text stringlengths 4 1.02M | meta dict |
|---|---|
import hashlib
import os.path
TEMPLATE_HTML = """
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{{title}}</title>
<link rel="stylesheet" href="/static/css/github.css?v=css_version">
<script type="text/javascript" src="/static/js/jquery-3.2.1.min.js" ></script>
</head>
<body>
<div class="doc-tree js-doc-tree" >
{% raw html_tree %}
</div>
<div class="ui grid main-wrapper" >
<div class="row" >
<div class="doc-content js-doc-content codehilite" >
{% raw html %}
</div>
</div>
</div>
<script type="text/javascript">
var gDocCache = {};
$("a.js-has-sub").on("click",function (e) {
e.preventDefault();
$(this).toggleClass("close")
$(this).parent().find("ul:first").slideToggle();
});
$("div.js-doc-tree").on("click","a",function(e){
e.preventDefault();
var $this = $(this);
if($this.hasClass("js-has-sub")) return;
var path = $this.attr("href");
var title = $this.html();
document.title = title;
pushHistoryState(path, title, path);
fetchHtmlContent(path);
});
function pushHistoryState(state, title, url) {
if (window.history.pushState) {
history.pushState(state, title, url);
} else {
window.location.href = window.location.host + path;
}
}
function fetchHtmlContent(path){
if(!path)return;
var key = path.replace(/\//gi,'_');
var cacheData = gDocCache[key];
var docContent = $("div.js-doc-content");
if(cacheData){
docContent.html(cacheData);
return;
}
$.get(path).done(function(data){
docContent.html(data);
gDocCache[key] = data;
}).fail(function(){
docContent.html("something wrong, please try");
});
}
onpopstate = function(e){
fetchHtmlContent(e.state);
}
</script>
</body>
</html>
"""
css_v = hashlib.md5(
open(os.path.join(
os.path.dirname(__file__),
'static/css/github.css'), 'r').read()).hexdigest()[0:6]
template_html = TEMPLATE_HTML.replace('css_version', css_v)
| {
"content_hash": "5d13dd40cf91e8f547047b04b6698be7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 24.25,
"alnum_prop": 0.5707591377694471,
"repo_name": "wecatch/turbo-markdown",
"id": "d82a201d386794866152989480721a972ab9ba6b",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbo_markdown/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3515"
},
{
"name": "Makefile",
"bytes": "2313"
},
{
"name": "Python",
"bytes": "14389"
}
],
"symlink_target": ""
} |
from __future__ import division
import vistrails.core.db.action
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.db.io import serialize, unserialize
from vistrails.core import debug
from vistrails.core.interpreter.default import get_default_interpreter
from vistrails.core.log.group_exec import GroupExec
from vistrails.core.log.machine import Machine
from vistrails.core.log.module_exec import ModuleExec
from vistrails.core.modules.basic_modules import Constant
import vistrails.core.modules.module_registry
import vistrails.core.modules.utils
from vistrails.core.modules.vistrails_module import Module, ModuleError, \
InvalidOutput
from vistrails.core.vistrail.annotation import Annotation
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.vistrail.group import Group
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.db.domain import IdScope
import vistrails.db.versions
import copy
import inspect
from itertools import izip
import os
import re
import sys
import tempfile
from IPython.parallel.error import CompositeError
from .api import get_client
try:
import hashlib
sha1_hash = hashlib.sha1
except ImportError:
import sha
sha1_hash = sha.new
###############################################################################
# This function is sent to the engines which execute it
#
# It receives the workflow, and the list of targeted output ports
#
# It returns the corresponding computed outputs and the execution log
#
def execute_wf(wf, output_port):
# Save the workflow in a temporary file
temp_wf_fd, temp_wf = tempfile.mkstemp()
try:
f = open(temp_wf, 'w')
f.write(wf)
f.close()
os.close(temp_wf_fd)
# Clean the cache
interpreter = get_default_interpreter()
interpreter.flush()
# Load the Pipeline from the temporary file
vistrail = Vistrail()
locator = XMLFileLocator(temp_wf)
workflow = locator.load(Pipeline)
# Build a Vistrail from this single Pipeline
action_list = []
for module in workflow.module_list:
action_list.append(('add', module))
for connection in workflow.connection_list:
action_list.append(('add', connection))
action = vistrails.core.db.action.create_action(action_list)
vistrail.add_action(action, 0L)
vistrail.update_id_scope()
tag = 'parallel flow'
vistrail.addTag(tag, action.id)
# Build a controller and execute
controller = VistrailController()
controller.set_vistrail(vistrail, None)
controller.change_selected_version(vistrail.get_version_number(tag))
execution = controller.execute_current_workflow(
custom_aliases=None,
custom_params=None,
extra_info=None,
reason='API Pipeline Execution')
# Build a list of errors
errors = []
pipeline = vistrail.getPipeline(tag)
execution_errors = execution[0][0].errors
if execution_errors:
for key in execution_errors:
module = pipeline.modules[key]
msg = '%s: %s' %(module.name, execution_errors[key])
errors.append(msg)
# Get the execution log from the controller
try:
module_log = controller.log.workflow_execs[0].item_execs[0]
except IndexError:
errors.append("Module log not found")
return dict(errors=errors)
else:
machine = controller.log.workflow_execs[0].machines[
module_log.machine_id]
xml_log = serialize(module_log)
machine_log = serialize(machine)
# Get the output value
output = None
if not execution_errors:
executed_module, = execution[0][0].executed
executed_module = execution[0][0].objects[executed_module]
try:
output = executed_module.get_output(output_port)
except ModuleError:
errors.append("Output port not found: %s" % output_port)
return dict(errors=errors)
if isinstance(output, Module):
raise TypeError("Output value is a Module instance")
# Return the dictionary, that will be sent back to the client
return dict(errors=errors,
output=output,
xml_log=xml_log,
machine_log=machine_log)
finally:
os.unlink(temp_wf)
###############################################################################
_ansi_code = re.compile(r'%s(?:(?:\[[^A-Za-z]*[A-Za-z])|[^\[])' % '\x1B')
def strip_ansi_codes(s):
return _ansi_code.sub('', s)
###############################################################################
# Map Operator
#
class Map(Module):
"""The Map Module executes a map operator in parallel on IPython engines.
The FunctionPort should be connected to the 'self' output of the module you
want to execute.
The InputList is the list of values to be scattered on the engines.
"""
def __init__(self):
Module.__init__(self)
def update_upstream(self):
"""A modified version of the update_upstream method."""
# everything is the same except that we don't update anything
# upstream of FunctionPort
for port_name, connector_list in self.inputPorts.iteritems():
if port_name == 'FunctionPort':
for connector in connector_list:
connector.obj.update_upstream()
else:
for connector in connector_list:
connector.obj.update()
for port_name, connectorList in copy.copy(self.inputPorts.items()):
if port_name != 'FunctionPort':
for connector in connectorList:
if connector.obj.get_output(connector.port) is \
InvalidOutput:
self.remove_input_connector(port_name, connector)
@staticmethod
def print_compositeerror(e):
sys.stderr.write("Got %d exceptions from IPython engines:\n" %
len(e.elist))
for e_type, e_msg, formatted_tb, infos in e.elist:
sys.stderr.write("Error from engine %d (%r):\n" % (
infos['engine_id'], infos['engine_uuid']))
sys.stderr.write("%s\n" % strip_ansi_codes(formatted_tb))
@staticmethod
def list_exceptions(e):
return '\n'.join(
"% 3d: %s: %s" % (infos['engine_id'],
e_type,
e_msg)
for e_type, e_msg, tb, infos in e.elist)
def updateFunctionPort(self):
"""
Function to be used inside the updateUsptream method of the Map module. It
updates the module connected to the FunctionPort port, executing it in
parallel.
"""
nameInput = self.get_input('InputPort')
nameOutput = self.get_input('OutputPort')
rawInputList = self.get_input('InputList')
# Create inputList to always have iterable elements
# to simplify code
if len(nameInput) == 1:
element_is_iter = False
inputList = [[element] for element in rawInputList]
else:
element_is_iter = True
inputList = rawInputList
workflows = []
module = None
vtType = None
# iterating through the connectors
for connector in self.inputPorts.get('FunctionPort'):
module = connector.obj
# pipeline
original_pipeline = connector.obj.moduleInfo['pipeline']
# module
module_id = connector.obj.moduleInfo['moduleId']
vtType = original_pipeline.modules[module_id].vtType
# serialize the module for each value in the list
for i, element in enumerate(inputList):
if element_is_iter:
self.element = element
else:
self.element = element[0]
# checking type and setting input in the module
self.typeChecking(connector.obj, nameInput, inputList)
self.setInputValues(connector.obj, nameInput, element, i)
pipeline_db_module = original_pipeline.modules[module_id].do_copy()
# transforming a subworkflow in a group
# TODO: should we also transform inner subworkflows?
if pipeline_db_module.is_abstraction():
group = Group(id=pipeline_db_module.id,
cache=pipeline_db_module.cache,
location=pipeline_db_module.location,
functions=pipeline_db_module.functions,
annotations=pipeline_db_module.annotations)
source_port_specs = pipeline_db_module.sourcePorts()
dest_port_specs = pipeline_db_module.destinationPorts()
for source_port_spec in source_port_specs:
group.add_port_spec(source_port_spec)
for dest_port_spec in dest_port_specs:
group.add_port_spec(dest_port_spec)
group.pipeline = pipeline_db_module.pipeline
pipeline_db_module = group
# getting highest id between functions to guarantee unique ids
# TODO: can get current IdScope here?
if pipeline_db_module.functions:
high_id = max(function.db_id
for function in pipeline_db_module.functions)
else:
high_id = 0
# adding function and parameter to module in pipeline
# TODO: 'pos' should not be always 0 here
id_scope = IdScope(beginId=long(high_id+1))
for elementValue, inputPort in izip(element, nameInput):
p_spec = pipeline_db_module.get_port_spec(inputPort, 'input')
descrs = p_spec.descriptors()
if len(descrs) != 1:
raise ModuleError(
self,
"Tuple input ports are not supported")
if not issubclass(descrs[0].module, Constant):
raise ModuleError(
self,
"Module inputs should be Constant types")
type = p_spec.sigstring[1:-1]
mod_function = ModuleFunction(id=id_scope.getNewId(ModuleFunction.vtType),
pos=0,
name=inputPort)
mod_param = ModuleParam(id=0L,
pos=0,
type=type,
val=elementValue)
mod_function.add_parameter(mod_param)
pipeline_db_module.add_function(mod_function)
# serializing module
wf = self.serialize_module(pipeline_db_module)
workflows.append(wf)
# getting first connector, ignoring the rest
break
# IPython stuff
try:
rc = get_client()
except Exception, error:
raise ModuleError(self, "Exception while loading IPython: %s" %
debug.format_exception(error))
if rc is None:
raise ModuleError(self, "Couldn't get an IPython connection")
engines = rc.ids
if not engines:
raise ModuleError(
self,
"Exception while loading IPython: No IPython engines "
"detected!")
# initializes each engine
# importing modules and initializing the VisTrails application
# in the engines *only* in the first execution on this engine
uninitialized = []
for eng in engines:
try:
rc[eng]['init']
except Exception:
uninitialized.append(eng)
if uninitialized:
init_view = rc[uninitialized]
with init_view.sync_imports():
import tempfile
import inspect
# VisTrails API
import vistrails
import vistrails.core
import vistrails.core.db.action
import vistrails.core.application
import vistrails.core.modules.module_registry
from vistrails.core.db.io import serialize
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.interpreter.default import get_default_interpreter
# initializing a VisTrails application
try:
init_view.execute(
'app = vistrails.core.application.init('
' {"spawned": True},'
' args=[])',
block=True)
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error initializing application on "
"IPython engines:\n"
"%s" % self.list_exceptions(e))
init_view['init'] = True
# setting computing color
module.logging.set_computing(module)
# executing function in engines
# each map returns a dictionary
try:
ldview = rc.load_balanced_view()
map_result = ldview.map_sync(execute_wf, workflows, [nameOutput]*len(workflows))
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error from IPython engines:\n"
"%s" % self.list_exceptions(e))
# verifying errors
errors = []
for engine in range(len(map_result)):
if map_result[engine]['errors']:
msg = "ModuleError in engine %d: '%s'" % (
engine,
', '.join(map_result[engine]['errors']))
errors.append(msg)
if errors:
raise ModuleError(self, '\n'.join(errors))
# setting success color
module.logging.signalSuccess(module)
reg = vistrails.core.modules.module_registry.get_module_registry()
self.result = []
for map_execution in map_result:
output = map_execution['output']
self.result.append(output)
# including execution logs
for engine in range(len(map_result)):
log = map_result[engine]['xml_log']
exec_ = None
if (vtType == 'abstraction') or (vtType == 'group'):
exec_ = unserialize(log, GroupExec)
elif (vtType == 'module'):
exec_ = unserialize(log, ModuleExec)
else:
# something is wrong...
continue
# assigning new ids to existing annotations
exec_annotations = exec_.annotations
for i in range(len(exec_annotations)):
exec_annotations[i].id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
parallel_annotation = Annotation(key='parallel_execution', value=True)
parallel_annotation.id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
annotations = [parallel_annotation] + exec_annotations
exec_.annotations = annotations
# before adding the execution log, we need to get the machine information
machine = unserialize(map_result[engine]['machine_log'], Machine)
machine_id = self.logging.add_machine(machine)
# recursively add machine information to execution items
def add_machine_recursive(exec_):
for item in exec_.item_execs:
if hasattr(item, 'machine_id'):
item.machine_id = machine_id
if item.vtType in ('abstraction', 'group'):
add_machine_recursive(item)
exec_.machine_id = machine_id
if (vtType == 'abstraction') or (vtType == 'group'):
add_machine_recursive(exec_)
self.logging.add_exec(exec_)
def serialize_module(self, module):
"""
Serializes a module to be executed in parallel.
"""
def process_group(group):
group.pipeline.id = None
for module in group.pipeline.module_list:
if module.is_group():
process_group(module)
pipeline = Pipeline(version=vistrails.db.versions.currentVersion)
if module.is_group():
process_group(module)
module = module.do_copy()
pipeline.add_module(module)
return serialize(pipeline)
def compute(self):
"""The compute method for Map."""
self.result = None
self.updateFunctionPort()
self.set_output('Result', self.result)
###############################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the Map module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature):
"""
Creates a module for value, in order to do the type checking.
"""
from vistrails.core.modules.basic_modules import Boolean, String, Integer, Float, List
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
elif isinstance(value, list):
return List
elif isinstance(value, tuple):
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]))
return v_modules
else:
from vistrails.core import debug
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside Map module.")
return None
| {
"content_hash": "f0be219bd646351d608848a7c630fc45",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 98,
"avg_line_length": 37.90998043052838,
"alnum_prop": 0.561377245508982,
"repo_name": "VisTrails/VisTrails",
"id": "5866af7eeaa301882b9d5961722718f4ca732a6a",
"size": "21286",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/packages/parallelflow/map.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
"""JSON Encoding Rules (JER) codec.
"""
import time
import json
import binascii
import math
import datetime
from ..parser import EXTENSION_MARKER
from . import BaseType, format_bytes, ErrorWithLocation
from . import EncodeError
from . import DecodeError
from . import compiler
from . import format_or
from . import utc_time_to_datetime
from . import utc_time_from_datetime
from . import generalized_time_to_datetime
from . import generalized_time_from_datetime
from .compiler import enum_values_as_dict
class Type(BaseType):
def set_size_range(self, minimum, maximum, has_extension_marker):
pass
class StringType(Type):
def __init__(self, name):
super(StringType, self).__init__(name,
self.__class__.__name__)
def encode(self, data):
return data
def decode(self, data):
return data
class MembersType(Type):
def __init__(self,
name,
members,
type_name):
super(MembersType, self).__init__(name, type_name)
self.members = members
def encode(self, data):
values = {}
for member in self.members:
name = member.name
if name in data:
try:
value = member.encode(data[name])
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
elif member.optional or member.has_default():
continue
else:
raise EncodeError(
"{} member '{}' not found in {}.".format(
self.__class__.__name__,
name,
data))
values[name] = value
return values
def decode(self, data):
values = {}
for member in self.members:
name = member.name
if name in data:
try:
value = member.decode(data[name])
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
values[name] = value
elif member.optional:
pass
elif member.has_default():
values[name] = member.get_default()
return values
def __repr__(self):
return '{}({}, [{}])'.format(
self.__class__.__name__,
self.name,
', '.join([repr(member) for member in self.members]))
class Boolean(Type):
def __init__(self, name):
super(Boolean, self).__init__(name, 'BOOLEAN')
def encode(self, data):
return data
def decode(self, data):
return data
class Integer(Type):
def __init__(self, name):
super(Integer, self).__init__(name, 'INTEGER')
def encode(self, data):
return data
def decode(self, data):
return data
class Real(Type):
def __init__(self, name):
super(Real, self).__init__(name, 'REAL')
def encode(self, data):
if data == float('inf'):
return 'INF'
elif data == float('-inf'):
return '-INF'
elif math.isnan(data):
return 'NaN'
else:
return data
def decode(self, data):
if isinstance(data, float):
return data
else:
return {
'INF': float('inf'),
'-INF': float('-inf'),
'NaN': float('nan'),
'0': 0.0,
'-0': 0.0
}[data]
class Null(Type):
def __init__(self, name):
super(Null, self).__init__(name, 'NULL')
def encode(self, data):
return data
def decode(self, data):
return data
class BitString(Type):
def __init__(self, name, minimum, maximum):
super(BitString, self).__init__(name, 'BIT STRING')
if minimum is None and maximum is None:
self.size = None
elif minimum == maximum:
self.size = minimum
else:
self.size = None
def encode(self, data):
value = format_bytes(data[0]).upper()
if self.size is None:
value = {
"value": value,
"length": data[1]
}
return value
def decode(self, data):
if self.size is None:
return (binascii.unhexlify(data['value']), data['length'])
else:
return (binascii.unhexlify(data), self.size)
class OctetString(Type):
def __init__(self, name):
super(OctetString, self).__init__(name, 'OCTET STRING')
def encode(self, data):
return format_bytes(data).upper()
def decode(self, data):
return binascii.unhexlify(data)
class ObjectIdentifier(Type):
def __init__(self, name):
super(ObjectIdentifier, self).__init__(name, 'OBJECT IDENTIFIER')
def encode(self, data):
return data
def decode(self, data):
return str(data)
class Enumerated(Type):
def __init__(self, name, values, numeric):
super(Enumerated, self).__init__(name, 'ENUMERATED')
if numeric:
self.values = {k: k for k in enum_values_as_dict(values)}
else:
self.values = {
v: v for v in enum_values_as_dict(values).values()
}
self.has_extension_marker = (EXTENSION_MARKER in values)
def format_values(self):
return format_or(sorted(list(self.values)))
def encode(self, data):
try:
value = self.values[data]
except KeyError:
raise EncodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_values(),
data))
return value
def decode(self, data):
if data in self.values:
return self.values[data]
elif self.has_extension_marker:
return None
else:
raise DecodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_values(),
data))
class Sequence(MembersType):
def __init__(self, name, members):
super(Sequence, self).__init__(name, members, 'SEQUENCE')
class SequenceOf(Type):
def __init__(self, name, element_type):
super(SequenceOf, self).__init__(name, 'SEQUENCE OF')
self.element_type = element_type
def encode(self, data):
values = []
for entry in data:
value = self.element_type.encode(entry)
values.append(value)
return values
def decode(self, data):
values = []
for element_data in data:
value = self.element_type.decode(element_data)
values.append(value)
return values
def __repr__(self):
return 'SequenceOf({}, {})'.format(self.name,
self.element_type)
class Set(MembersType):
def __init__(self, name, members):
super(Set, self).__init__(name, members, 'SET')
class SetOf(Type):
def __init__(self, name, element_type):
super(SetOf, self).__init__(name, 'SET OF')
self.element_type = element_type
def encode(self, data):
values = []
for entry in data:
value = self.element_type.encode(entry)
values.append(value)
return values
def decode(self, data):
values = []
for element_data in data:
value = self.element_type.decode(element_data)
values.append(value)
return values
def __repr__(self):
return 'SetOf({}, {})'.format(self.name,
self.element_type)
class Choice(Type):
def __init__(self, name, members, has_extension_marker):
super(Choice, self).__init__(name, 'CHOICE')
self.members = members
self.name_to_member = {member.name: member for member in self.members}
self.has_extension_marker = has_extension_marker
def format_names(self):
return format_or(sorted([member.name for member in self.members]))
def encode(self, data):
try:
member = self.name_to_member[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
try:
return {member.name: member.encode(data[1])}
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
def decode(self, data):
name, value = list(data.items())[0]
if name in self.name_to_member:
member = self.name_to_member[name]
elif self.has_extension_marker:
return (None, None)
else:
raise DecodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
name))
try:
return (name, member.decode(value))
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
def __repr__(self):
return 'Choice({}, [{}])'.format(
self.name,
', '.join([repr(member) for member in self.members]))
class UTF8String(StringType):
pass
class NumericString(StringType):
pass
class PrintableString(StringType):
pass
class IA5String(StringType):
pass
class VisibleString(StringType):
pass
class GeneralString(StringType):
pass
class BMPString(StringType):
pass
class GraphicString(StringType):
pass
class UniversalString(StringType):
pass
class TeletexString(StringType):
pass
class ObjectDescriptor(GraphicString):
pass
class UTCTime(StringType):
def encode(self, data):
return utc_time_from_datetime(data)
def decode(self, data):
return utc_time_to_datetime(data)
class GeneralizedTime(StringType):
def encode(self, data):
return generalized_time_from_datetime(data)
def decode(self, data):
return generalized_time_to_datetime(data)
class Date(StringType):
def encode(self, data):
return str(data)
def decode(self, data):
return datetime.date(*time.strptime(data, '%Y-%m-%d')[:3])
class TimeOfDay(StringType):
def encode(self, data):
return str(data)
def decode(self, data):
return datetime.time(*time.strptime(data, '%H:%M:%S')[3:6])
class DateTime(StringType):
def encode(self, data):
return str(data).replace(' ', 'T')
def decode(self, data):
return datetime.datetime(*time.strptime(data, '%Y-%m-%dT%H:%M:%S')[:6])
class Any(Type):
def __init__(self, name):
super(Any, self).__init__(name, 'ANY')
def encode(self, _data):
raise NotImplementedError('ANY is not yet implemented.')
def decode(self, _data):
raise NotImplementedError('ANY is not yet implemented.')
class Recursive(compiler.Recursive, Type):
def __init__(self, name, type_name, module_name):
super(Recursive, self).__init__(name, 'RECURSIVE')
self.type_name = type_name
self.module_name = module_name
self._inner = None
def set_inner_type(self, inner):
self._inner = inner
def encode(self, data):
return self._inner.encode(data)
def decode(self, data):
return self._inner.decode(data)
class CompiledType(compiler.CompiledType):
def encode(self, data, indent=None):
try:
dictionary = self._type.encode(data)
except ErrorWithLocation as e:
# Add member location
e.add_location(self._type)
raise e
if indent is None:
string = json.dumps(dictionary, separators=(',', ':'))
else:
string = json.dumps(dictionary, indent=indent)
return string.encode('utf-8')
def decode(self, data):
try:
return self._type.decode(json.loads(data.decode('utf-8')))
except ErrorWithLocation as e:
# Add member location
e.add_location(self._type)
raise e
class Compiler(compiler.Compiler):
def process_type(self, type_name, type_descriptor, module_name):
compiled_type = self.compile_type(type_name,
type_descriptor,
module_name)
return CompiledType(compiled_type)
def compile_type(self, name, type_descriptor, module_name):
module_name = self.get_module_name(type_descriptor, module_name)
type_name = type_descriptor['type']
if type_name == 'SEQUENCE':
members, _ = self.compile_members(
type_descriptor['members'],
module_name)
compiled = Sequence(name, members)
elif type_name == 'SEQUENCE OF':
compiled = SequenceOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'SET':
members, _ = self.compile_members(
type_descriptor['members'],
module_name)
compiled = Set(name, members)
elif type_name == 'SET OF':
compiled = SetOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'CHOICE':
compiled = Choice(name,
*self.compile_members(
type_descriptor['members'],
module_name))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Real(name)
elif type_name == 'ENUMERATED':
compiled = Enumerated(name,
self.get_enum_values(type_descriptor,
module_name),
self._numeric_enums)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OBJECT IDENTIFIER':
compiled = ObjectIdentifier(name)
elif type_name == 'OCTET STRING':
compiled = OctetString(name)
elif type_name == 'TeletexString':
compiled = TeletexString(name)
elif type_name == 'NumericString':
compiled = NumericString(name)
elif type_name == 'PrintableString':
compiled = PrintableString(name)
elif type_name == 'IA5String':
compiled = IA5String(name)
elif type_name == 'VisibleString':
compiled = VisibleString(name)
elif type_name == 'GeneralString':
compiled = GeneralString(name)
elif type_name == 'DATE':
compiled = Date(name)
elif type_name == 'TIME-OF-DAY':
compiled = TimeOfDay(name)
elif type_name == 'DATE-TIME':
compiled = DateTime(name)
elif type_name == 'UTF8String':
compiled = UTF8String(name)
elif type_name == 'BMPString':
compiled = BMPString(name)
elif type_name == 'GraphicString':
compiled = GraphicString(name)
elif type_name == 'UTCTime':
compiled = UTCTime(name)
elif type_name == 'UniversalString':
compiled = UniversalString(name)
elif type_name == 'GeneralizedTime':
compiled = GeneralizedTime(name)
elif type_name == 'BIT STRING':
minimum, maximum, _ = self.get_size_range(type_descriptor,
module_name)
compiled = BitString(name, minimum, maximum)
elif type_name == 'ANY':
compiled = Any(name)
elif type_name == 'ANY DEFINED BY':
compiled = Any(name)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'EXTERNAL':
members, _ = self.compile_members(
self.external_type_descriptor()['members'],
module_name)
compiled = Sequence(name, members)
elif type_name == 'ObjectDescriptor':
compiled = ObjectDescriptor(name)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
return compiled
def compile_dict(specification, numeric_enums=False):
return Compiler(specification, numeric_enums).process()
def decode_full_length(_data):
raise DecodeError('Decode length is not supported for this codec.')
| {
"content_hash": "cca2ee465cde2aa094774e40f9fed346",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 79,
"avg_line_length": 26.961479198767336,
"alnum_prop": 0.5268602125957252,
"repo_name": "eerimoq/asn1tools",
"id": "9c51da8d90221ce1e35e21867a95ff9792a5947d",
"size": "17498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asn1tools/codecs/jer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "837537"
},
{
"name": "CSS",
"bytes": "653"
},
{
"name": "HTML",
"bytes": "225"
},
{
"name": "JavaScript",
"bytes": "27250"
},
{
"name": "Makefile",
"bytes": "12405"
},
{
"name": "Python",
"bytes": "7686766"
},
{
"name": "Rust",
"bytes": "148013"
}
],
"symlink_target": ""
} |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import re
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.utils2 import i18n
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
from salts_lib.constants import Q_ORDER
import scraper
logger = log_utils.Logger.get_logger()
BASE_URL = 'http://rmz.cr'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'RMZ'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, require_debrid=True, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
page_url = self.__get_release(html, video)
if page_url is None: return hosters
page_url = scraper_utils.urljoin(self.base_url, page_url)
html = self._http_get(page_url, require_debrid=True, cache_limit=.5)
hevc = False
for _attrs, content in dom_parser2.parse_dom(html, 'span', {'class': 'releaselabel'}):
if re.search('(hevc|x265)', content, re.I):
hevc = 'x265'
match = re.search('(\d+)x(\d+)', content)
if match:
_width, height = match.groups()
quality = scraper_utils.height_get_quality(height)
break
else:
quality = QUALITIES.HIGH
streams = [attrs['href'] for attrs, _content in dom_parser2.parse_dom(html, 'a', {'class': 'links'}, req='href')]
streams += [content for _attrs, content in dom_parser2.parse_dom(html, 'pre', {'class': 'links'})]
for stream_url in streams:
if scraper_utils.excluded_link(stream_url): continue
host = urlparse.urlparse(stream_url).hostname
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
if hevc: hoster['format'] = hevc
hosters.append(hoster)
return hosters
def __get_release(self, html, video):
try: select = int(kodi.get_setting('%s-select' % (self.get_name())))
except: select = 0
ul_id = 'releases' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
fragment = dom_parser2.parse_dom(html, 'ul', {'id': ul_id})
if fragment:
best_qorder = 0
best_page = None
for _attrs, item in dom_parser2.parse_dom(fragment[0].content, 'li'):
match = dom_parser2.parse_dom(item, 'span', req=['href', 'title'])
if not match:
match = dom_parser2.parse_dom(item, 'a', req=['href', 'title'])
if not match: continue
page_url, release = match[0].attrs['href'], match[0].attrs['title']
match = dom_parser2.parse_dom(item, 'span', {'class': 'time'})
if match and self.__too_old(match[0].content): break
release = re.sub('^\[[^\]]*\]\s*', '', release)
if video.video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(release)
else:
if not scraper_utils.release_check(video, release, require_title=False): continue
meta = scraper_utils.parse_episode_link(release)
if select == 0:
best_page = page_url
break
else:
quality = scraper_utils.height_get_quality(meta['height'])
logger.log('result: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
if Q_ORDER[quality] > best_qorder:
logger.log('Setting best as: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
best_page = page_url
best_qorder = Q_ORDER[quality]
return best_page
def __too_old(self, age):
filter_days = int(kodi.get_setting('%s-filter' % (self.get_name())))
if filter_days and scraper_utils.get_days(age) > filter_days:
return True
else:
return False
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="60" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
return settings
def _get_episode_url(self, show_url, video):
show_url = scraper_utils.urljoin(self.base_url, show_url)
html = self._http_get(show_url, require_debrid=True, cache_limit=.5)
return self.__get_release(html, video)
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/')
search_url = scraper_utils.urljoin(search_url, urllib.quote_plus(title))
html = self._http_get(search_url, require_debrid=True, cache_limit=8)
for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'list'}):
if not dom_parser2.parse_dom(fragment, 'div', {'class': 'lists_titles'}): continue
for attrs, match_title_year in dom_parser2.parse_dom(fragment, 'a', {'class': 'title'}, req='href'):
match_url = attrs['href']
match_title_year = re.sub('</?[^>]*>', '', match_title_year)
is_show = re.search('\(d{4|-\)', match_title_year)
if (is_show and video_type == VIDEO_TYPES.MOVIE) or (not is_show and video_type == VIDEO_TYPES.TVSHOW): continue
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| {
"content_hash": "b63f0693ffdad41981051529e63fb3ab",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 191,
"avg_line_length": 46.945454545454545,
"alnum_prop": 0.5806868060934676,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "cd9976e8cabbd46d2dec8c07eea6f0e83d40da2f",
"size": "7746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.video.salts/scrapers/rmz_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
from alembic.operations import ops
from alembic.util import Dispatcher
from alembic.util import rev_id as new_rev_id
from neutron._i18n import _
from neutron.db.migration import cli
_ec_dispatcher = Dispatcher()
def process_revision_directives(context, revision, directives):
directives[:] = list(_assign_directives(context, directives))
def _assign_directives(context, directives, phase=None):
for directive in directives:
decider = _ec_dispatcher.dispatch(directive)
if phase is None:
phases = cli.MIGRATION_BRANCHES
else:
phases = (phase,)
for phase in phases:
decided = decider(context, directive, phase)
if decided:
yield decided
@_ec_dispatcher.dispatch_for(ops.MigrationScript)
def _migration_script_ops(context, directive, phase):
"""Generate a new ops.MigrationScript() for a given phase.
E.g. given an ops.MigrationScript() directive from a vanilla autogenerate
and an expand/contract phase name, produce a new ops.MigrationScript()
which contains only those sub-directives appropriate to "expand" or
"contract". Also ensure that the branch directory exists and that
the correct branch labels/depends_on/head revision are set up.
"""
version_path = cli._get_version_branch_path(
context.config, release=cli.CURRENT_RELEASE, branch=phase)
autogen_kwargs = {}
cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs)
op = ops.MigrationScript(
new_rev_id(),
ops.UpgradeOps(ops=list(
_assign_directives(context, directive.upgrade_ops.ops, phase)
)),
ops.DowngradeOps(ops=[]),
message=directive.message,
**autogen_kwargs
)
if not op.upgrade_ops.is_empty():
return op
@_ec_dispatcher.dispatch_for(ops.AddConstraintOp)
@_ec_dispatcher.dispatch_for(ops.CreateIndexOp)
@_ec_dispatcher.dispatch_for(ops.CreateTableOp)
@_ec_dispatcher.dispatch_for(ops.AddColumnOp)
def _expands(context, directive, phase):
if phase == 'expand':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.DropConstraintOp)
@_ec_dispatcher.dispatch_for(ops.DropIndexOp)
@_ec_dispatcher.dispatch_for(ops.DropTableOp)
@_ec_dispatcher.dispatch_for(ops.DropColumnOp)
def _contracts(context, directive, phase):
if phase == 'contract':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.AlterColumnOp)
def _alter_column(context, directive, phase):
is_expand = phase == 'expand'
if is_expand and directive.modify_nullable is True:
return directive
elif not is_expand and directive.modify_nullable is False:
return directive
else:
raise NotImplementedError(
_("Don't know if operation is an expand or "
"contract at the moment: %s") % directive)
@_ec_dispatcher.dispatch_for(ops.ModifyTableOps)
def _modify_table_ops(context, directive, phase):
op = ops.ModifyTableOps(
directive.table_name,
ops=list(_assign_directives(context, directive.ops, phase)),
schema=directive.schema)
if not op.is_empty():
return op
| {
"content_hash": "ca34932a5f1f3ccbc608d437e8591220",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 77,
"avg_line_length": 31.96039603960396,
"alnum_prop": 0.6833952912019826,
"repo_name": "openstack/neutron",
"id": "f0e91ea36488bc29ad00cfd97f36fb191e1ec2e2",
"size": "3832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/db/migration/autogen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
import os
import sys
from sphinx.locale import _
import chemex
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "ChemEx"
copyright = "2021, Guillaume Bouvignies"
author = "Guillaume Bouvignies"
# The full version, including alpha/beta/rc tags
version = chemex.__version__
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.extlinks",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.githubpages",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.napoleon",
"sphinxcontrib.httpdomain",
"sphinx_rtd_theme",
"recommonmark",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["build"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%B %d, %Y"
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ["chemex."]
# The default syntax to be highlighted
highlight_language = "python"
# Napoleon attributes
napoleon_google_docstring = False
napoleon_numpy_docstring = True
# Sphinx documentation build configuration file
autodoc_member_order = "groupwise"
# Whether to include todos or not
todo_include_todos = True
# Sphinx document translation with sphinx gettext feature uses these settings:
gettext_compact = False
# Whether to number all figures or not
numfig = True
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
# -- Extensions to theme docs ------------------------------------------------
def setup(app):
from sphinx.domains.python import PyField
from sphinx.util.docfields import Field
app.add_object_type(
"confval",
"confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
doc_field_types=[
PyField(
"type",
label=_("Type"),
has_arg=False,
names=("type",),
bodyrolename="class",
),
Field(
"default",
label=_("Default"),
has_arg=False,
names=("default",),
),
],
)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"canonical_url": "",
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": False,
# 'vcs_pageview_mode': '',
# 'style_nav_header_background': 'white',
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 3,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
html_logo = "_static/chemex_logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "chemexdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_engine = "xelatex"
# latex_elements = {
# # The paper size ('letterpaper' or 'a4paper').
# "papersize": "letterpaper",
# # 'papersize': 'a4',
# # The font size ('10pt', '11pt' or '12pt').
# "pointsize": "10pt",
# "releasename": " ",
# # Sonny, Lenny, Glenn, Conny, Rejne, Bjarne and Bjornstrup
# # 'fncychap': '\\usepackage[Lenny]{fncychap}',
# "fncychap": "\\usepackage{fncychap}",
# # Latex figure (float) alignment
# "figure_align": "htbp",
# # Start first page of each chapter on any page
# "extraclassoptions": "openany, oneside",
# # Additional stuff for the LaTeX preamble.
# #
# "preamble": r"""
# \setcounter{tocdepth}{2}
# \usepackage[depth=2]{bookmark}
# \usepackage[notbib,notindex]{tocbibind}
# \usepackage{charter}
# %\usepackage{crimson}
# \usepackage[defaultsans]{lato}
# %\usepackage{DejaVuSans}
# \usepackage{inconsolata}
# %\usepackage{sourcecodepro}
# %\usepackage{DejaVuSansMono}
# \usepackage[utf8]{inputenc}
# \DeclareUnicodeCharacter{03D6}{\ensuremath{\varpi}}
# """,
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [("index", "chemex.tex", "ChemEx Documentation", author, "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
latex_logo = "_static/chemex_logo.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"chemex",
"an open source Python package for analysis of NMR chemical exchange data",
author,
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"chemex",
"ChemEx Documentation",
author,
"ChemEx",
"ChemEx Documentation.",
"Miscellaneous",
),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_theme = "epub"
epub_basename = "chemex"
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_scheme = "url"
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
epub_identifier = epub_publisher
# A unique identification for the text.
epub_uid = "web-site"
# A list of files that should not be packed into the epub file.
epub_fix_images = False
epub_max_image_width = 0
epub_show_urls = "inline"
epub_use_index = True
epub_description = "ChemEx Documentation"
| {
"content_hash": "a0bece1bcd572f70abaeef8787904a39",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 85,
"avg_line_length": 29.789473684210527,
"alnum_prop": 0.6540822019713595,
"repo_name": "gbouvignies/chemex",
"id": "ac232808792270cd1834c6af961e8da2a1c47cb5",
"size": "11305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "415881"
}
],
"symlink_target": ""
} |
import copy
from django.db.models.base import Model, ModelBase
from django.template.defaultfilters import slugify
from django.contrib.auth.models import Permission as DjangoPermission
from django.contrib.contenttypes.models import ContentType
from authority.exceptions import NotAModel, UnsavedModelInstance
from authority.models import Permission
class PermissionMetaclass(type):
"""
Used to generate the default set of permission checks "add", "change" and
"delete".
"""
def __new__(cls, name, bases, attrs):
new_class = super(
PermissionMetaclass, cls).__new__(cls, name, bases, attrs)
if not new_class.label:
new_class.label = "%s_permission" % new_class.__name__.lower()
new_class.label = slugify(new_class.label)
if new_class.checks is None:
new_class.checks = []
# force check names to be lower case
new_class.checks = [check.lower() for check in new_class.checks]
return new_class
class BasePermission(object):
"""
Base Permission class to be used to define app permissions.
"""
__metaclass__ = PermissionMetaclass
checks = ()
label = None
generic_checks = ['add', 'browse', 'change', 'delete']
def __init__(self, user=None, group=None, *args, **kwargs):
self.user = user
self.group = group
super(BasePermission, self).__init__(*args, **kwargs)
def has_user_perms(self, perm, obj, approved, check_groups=True):
if self.user:
if self.user.is_superuser:
return True
if not self.user.is_active:
return False
# check if a Permission object exists for the given params
return Permission.objects.user_permissions(self.user, perm, obj,
approved, check_groups).filter(object_id=obj.id)
return False
def has_group_perms(self, perm, obj, approved):
"""
Check if group has the permission for the given object
"""
if self.group:
perms = Permission.objects.group_permissions(self.group, perm, obj,
approved)
return perms.filter(object_id=obj.id)
return False
def has_perm(self, perm, obj, check_groups=True, approved=True):
"""
Check if user has the permission for the given object
"""
if self.user:
if self.has_user_perms(perm, obj, approved, check_groups):
return True
if self.group:
return self.has_group_perms(perm, obj, approved)
return False
def requested_perm(self, perm, obj, check_groups=True):
"""
Check if user requested a permission for the given object
"""
return self.has_perm(perm, obj, check_groups, False)
def can(self, check, generic=False, *args, **kwargs):
if not args:
args = [self.model]
perms = False
for obj in args:
# skip this obj if it's not a model class or instance
if not isinstance(obj, (ModelBase, Model)):
continue
# first check Django's permission system
if self.user:
try:
perm = self.get_django_codename(check, obj, generic)
app_label, pname = perm.split('.')
djPerm = DjangoPermission.objects.get(content_type=ContentType.objects.get_for_model(obj), codename=pname)
perms = perms or self.user.user_permissions.filter(pk=djPerm.pk)
except:
pass
perm = self.get_codename(check, obj, generic)
# then check authority's per object permissions
if not isinstance(obj, ModelBase) and isinstance(obj, self.model):
# only check the authority if obj is not a model class
perms = perms or self.has_perm(perm, obj)
return perms
def get_django_codename(self, check, model_or_instance, generic=False, without_left=False):
if without_left:
perm = check
else:
perm = '%s.%s' % (model_or_instance._meta.app_label, check.lower())
if generic:
perm = '%s_%s' % (perm, model_or_instance._meta.object_name.lower())
return perm
def get_codename(self, check, model_or_instance, generic=False):
perm = '%s.%s' % (self.label, check.lower())
if generic:
perm = '%s_%s' % (perm, model_or_instance._meta.object_name.lower())
return perm
def assign(self, check=None, content_object=None, generic=False):
"""
Assign a permission to a user.
To assign permission for all checks: let check=None.
To assign permission for all objects: let content_object=None.
If generic is True then "check" will be suffixed with _modelname.
"""
result = []
if not content_object:
content_objects = (self.model,)
elif not isinstance(content_object, (list, tuple)):
content_objects = (content_object,)
else:
content_objects = content_object
if not check:
checks = self.generic_checks + getattr(self, 'checks', [])
elif not isinstance(check, (list, tuple)):
checks = (check,)
else:
checks = check
for content_object in content_objects:
# raise an exception before adding any permission
# i think Django does not rollback by default
if not isinstance(content_object, (Model, ModelBase)):
raise NotAModel(content_object)
elif isinstance(content_object, Model) and not content_object.pk:
raise UnsavedModelInstance(content_object)
content_type = ContentType.objects.get_for_model(content_object)
for check in checks:
if isinstance(content_object, Model):
# make an authority per object permission
codename = self.get_codename(check, content_object, generic)
try:
perm = Permission.objects.get(
user = self.user,
codename = codename,
approved = True,
content_type = content_type,
object_id = content_object.pk)
except Permission.DoesNotExist:
perm = Permission.objects.create(
user = self.user,
content_object = content_object,
codename = codename,
approved = True)
result.append(perm)
elif isinstance(content_object, ModelBase):
# make a Django permission
codename = self.get_django_codename(check, content_object, generic, without_left=True)
try:
perm = DjangoPermission.objects.get(codename=codename)
except DjangoPermission.DoesNotExist:
name = check
if '_' in name:
name = name[0:name.find('_')]
perm = DjangoPermission(
name = name,
codename = codename,
content_type = content_type)
perm.save()
self.user.user_permissions.add(perm)
result.append(perm)
return result
| {
"content_hash": "783a95c727a39a535f6f54ca875cf48a",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 126,
"avg_line_length": 40.182291666666664,
"alnum_prop": 0.5528191834089436,
"repo_name": "azizmb/django-authority",
"id": "aa59557777988c707078ea60a650cc0358165d10",
"size": "7715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "authority/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31314"
}
],
"symlink_target": ""
} |
def main( array, value ):
for i in range( array.size ):
array[i] = array[i] + value
return array
| {
"content_hash": "3e7a387d5e11dd10867613ea0847b4f8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 28.25,
"alnum_prop": 0.5752212389380531,
"repo_name": "naohisas/KVS",
"id": "c4f441d847e8a9930aec9b8ed8602fcd54e5aece",
"size": "113",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Example/SupportPython/Array/array.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1226831"
},
{
"name": "C++",
"bytes": "7219103"
},
{
"name": "CMake",
"bytes": "16176"
},
{
"name": "GLSL",
"bytes": "169334"
},
{
"name": "Makefile",
"bytes": "145951"
},
{
"name": "Python",
"bytes": "14182"
},
{
"name": "QMake",
"bytes": "5029"
},
{
"name": "Shell",
"bytes": "3023"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
import warnings
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def get_transformed_structures(self):
"""
Returns all TransformedStructures.
.. deprecated:: v2.1.0
Use transformed_structures attribute instead. Will be removed in
next version.
"""
warnings.warn("Use transformed_structures attribute instead.",
DeprecationWarning)
return self.transformed_structures
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
#need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to create
vasp input files from structures
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Callable to create subdirectory name from
transformed_structure. e.g.,
lambda x: x.other_parameters["tags"][0] to use the first tag.
include_cif (bool): Whether to output a CIF as well. CIF files
are generally better supported in visualization programs.
"""
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match("^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
StandardTransmuter.__init__(self, transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
StandardTransmuter.__init__(self, [tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub("\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory)
if include_cif:
from pymatgen.io.cifio import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| {
"content_hash": "909cc0c035de735f79112f9f35d75f6a",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 79,
"avg_line_length": 38.780092592592595,
"alnum_prop": 0.6049662746970692,
"repo_name": "yanikou19/pymatgen",
"id": "8862fead5fb40deb17cd9cd80e970015ad0ac190",
"size": "16770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/alchemy/transmuters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7429"
},
{
"name": "JavaScript",
"bytes": "3638"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3368797"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
} |
import os, subprocess, sys, shutil, argparse
sys.tracebacklimit = 3
## Reimplementation of the standard which tool in unix systems
#
# @param program The program whose existence in the PATH is to be checked.
# @return None, if the executable is available, the path and location of the executable if it is.
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# add the lib folder containing the scripts imported below to the python search path
script_subfolder = os.path.join(os.getcwd(), "lib")
if script_subfolder not in sys.path:
sys.path.insert(0, script_subfolder)
# import the required functions from the developed scripts
import prepare_vhdl
import create_sopc_model
import modify_arbiters
import create_nios_projects
# blessings module enables styles in the terminal output (bold print and colored output)
# if it is not found, exit the script
try:
from blessings import Terminal
except:
print "Module blessings not found..."
sys.exit()
# create a terminal object from the blessings module to print styled text
t = Terminal()
# test if quartus_sh and sopc_builder are in the current path, otherwise complain and exit script
if which("quartus_sh") == None:
print t.color(1)("Quartus executable not found...")
sys.exit()
if which("sopc_builder") == None:
print t.color(1)("SOPC Builder executable not found, maybe you need to start a Nios2 shell with nios2_command_shell.sh...")
sys.exit()
# add the command line arguments
# project_name: the name of the project to be created
parser = argparse.ArgumentParser(description='Automatically create Quartus projects with given name.')
parser.add_argument('--project_name',
action='store',
default="",
help='The Quartus project name',
required=True)
parser.add_argument('--system_description',
action='store',
default="",
help='Basic system description XML file',
required=True)
parser.add_argument('--sopc',
action='store_true',
default=False,
help='Start SOPC Builder to modify system',
required=False)
parser.add_argument('--no_tdma',
action='store_true',
default=False,
help='Do not modify arbiters',
required=False)
parser.add_argument('--source_code',
action='store',
default="",
help='Location of C sources for each CPU',
required=True)
args = parser.parse_args()
base_folder = "/home/psopc/Desktop/"
print "Creating project called: " + args.project_name
# set basic project properties
# get the project name from the command line arguments
# set the FPGA family and the device
project_name = args.project_name
fpga_family = "Cyclone IV E"
fpga_part = "EP4CE115F29C7"
# try to create the project folder
# if it already exists, delete it
try:
os.makedirs(os.path.join(base_folder, project_name))
except os.error:
# print t.color(1)("The project directory already exists...")
print t.color(1)("The directory already exists, so it will be deleted.")
shutil.rmtree(os.path.join(base_folder, project_name))
os.makedirs(os.path.join(base_folder, project_name))
# copy the standard VHDL top level file into the newly created project folder
shutil.copyfile(os.path.join("lib", "common_files", "standard_top_level.vhd"), os.path.join(base_folder, project_name, project_name + ".vhd"))
# TODO: find out how to set the timing constraints, the line below does not have any effect
shutil.copyfile(os.path.join("lib", "common_files", "standard_constraints.tcl"), os.path.join(base_folder, project_name, project_name + ".sdc"))
print t.bold("Modifying top level VHDL file...\n")
# call the first script function to modify the top level VHDL file
# set the correct entity and architecture name as well as the name of the system component
prepare_vhdl.prepare_vhdl_file(os.path.join(base_folder, project_name, project_name + ".vhd"), project_name)
print t.bold("Creating Quartus project...\n")
# try to run quartus_sh --prepare, which creates an empty project with the properties set above
# if it returns a non-zero value, exit the script and print an error message
# in any case, the Quartus output is shown
try:
p2 = subprocess.check_call(["quartus_sh", "--prepare","-f", fpga_family, "-d", fpga_part, os.path.join(base_folder, project_name, project_name)])
except subprocess.CalledProcessError:
print t.color(1)("The Quartus project could not be created...")
sys.exit()
print t.bold("Set assignments from file...")
# try to call the Quartus and run a TCL script which sets further project properties
# the output folder is set to output_files
# the pin location assignments are set
working_directory = os.getcwd()
os.chdir(os.path.join(base_folder, project_name))
try:
p2 = subprocess.check_call(["quartus_sh", "-t", os.path.join(working_directory, "lib", "common_files", "standard_assignments.tcl"), project_name])
except subprocess.CalledProcessError:
print t.color(1)("Assignments could not be set...")
sys.exit()
os.chdir(working_directory)
print t.bold("Creating a SOPC Builder system and generating it...")
# call the second script function, which creates a SOPC Builder input file from a given basic architecture description
create_sopc_model.create(base_folder, project_name, args.system_description, fpga_part, fpga_family)
# call SOPC Builder to generate the newly created model
# if there is an error during generation, open SOPC Builder to fix it
print t.bold("SOPC Builder running in background...")
p = subprocess.Popen(["sopc_builder", os.path.join(base_folder, project_name, project_name + "_system.sopc"), "--no-splash", "--generate"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error_text = p.communicate()
print "Output"
print output
# if the SOPC generation encountered an error, open the GUI to be able to fix it by hand
if "Error" in error_text:
print error_text
subprocess.call(["sopc_builder", os.path.join(base_folder, project_name, project_name + "_system.sopc")])
# if requested, open the SOPC Builder GUI
# if an error occurred, SOPC Builder will have been opened already
if not "error" in error_text:
if args.sopc == True:
print t.bold("Starting SOPC Builder as requested...")
subprocess.call(["sopc_builder", os.path.join(base_folder, project_name, project_name + "_system.sopc")])
# if requested, do not modify the arbiters in the system
if args.no_tdma == False:
print t.bold("Modifying the system file generated by SOPC Builder...")
# call the third script, which modifies the system VHDL file generated by SOPC Builder
# it takes the basic architecture description and the system VHDL file as input values
# using the architecture description, it finds all slaves which are connected to more than one master
# and modifies their arbiters
modify_arbiters.modify(args.system_description, os.path.join(base_folder, project_name, project_name + "_system.vhd"))
print t.bold("Compiling and synthesizing the system...")
# try to run the Quartus compilation flow to fully compile the project and create the FPGA programming image in the output_files folder
try:
p2 = subprocess.check_call(["quartus_sh", "--flow","compile", os.path.join(base_folder, project_name, project_name)])
except subprocess.CalledProcessError:
print t.color(1)("The Quartus project could not be compiled...")
sys.exit()
# print t.bold("Setting clock constraints...")
# os.chdir(project_name)
# try:
# p2 = subprocess.check_call(["quartus_sta", "-t", os.path.join("..", "input_files", "standard_constraints.tcl"), project_name])
# except subprocess.CalledProcessError:
# print t.color(1)("Constraints could not be set...")
# sys.exit()
# print t.bold("Running timing analyzer again...")
# try:
# p2 = subprocess.check_call(["quartus_sta", project_name, '-c', project_name])
# except subprocess.CalledProcessError:
# print t.color(1)("Timing analyzer failed to run...")
# sys.exit()
# os.chdir("..")
print t.bold("Programming the FPGA...")
try:
p2 = subprocess.check_call(["nios2-configure-sof", os.path.join(base_folder, project_name, "output_files", project_name + ".sof")])
except subprocess.CalledProcessError:
print t.color(1)("The programming did not succeed, you will have to run the following command manually:")
print t.color(1)("\tnios2-configure-sof " + str(os.path.join(base_folder, project_name, "output_files", project_name + ".sof")))
print t.bold("Creating board support packages and projects for each CPU...")
# the fourth script creates BSP support packages and Nios2 projects from given C or C++ source files
create_nios_projects.create(base_folder, project_name, args.system_description, os.path.join(base_folder, project_name, project_name + "_system.sopc"), args.source_code)
| {
"content_hash": "b1e9a3ceed8248e188a5cbaee0332612",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 187,
"avg_line_length": 39.6359649122807,
"alnum_prop": 0.7292243000995906,
"repo_name": "mikulcak/forsyde_psopc",
"id": "d8627f29b4203f8db19fe23be9bdefde83b68dab",
"size": "10510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source_code/system_creation/create_quartus_project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5731919"
},
{
"name": "C++",
"bytes": "122041"
},
{
"name": "Makefile",
"bytes": "1549"
},
{
"name": "Objective-C",
"bytes": "2103"
},
{
"name": "Python",
"bytes": "80491"
},
{
"name": "Tcl",
"bytes": "20644"
},
{
"name": "VHDL",
"bytes": "5227"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
def train_tfidf(comments, categories, class_weight=None):
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
text_clf = Pipeline([('vect', TfidfVectorizer(lowercase=True, ngram_range=(1, 3), analyzer="word", min_df=3)),
('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42,
class_weight=class_weight))])
text_clf = text_clf.fit(comments, categories)
return text_clf
class NaiveClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, badwords=[], fake=True):
self.badwords = badwords
self.fake = fake
def fit(self, X, y):
return self
def predict(self, X, y=None):
predictions = []
for x in X:
if not self.fake:
if any(badword in x.lower() for badword in self.badwords):
predictions.append(1)
else:
predictions.append(0)
else:
if "fakeinsult" in x:
predictions.append(1)
else:
predictions.append(0)
return np.array(predictions)
def build_word_vector(w2vmodel, text, size):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
sorted_vec = np.sort(w2vmodel[word])
vec += sorted_vec.reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
def w2vectorize(collection, model, n_dim):
from sklearn.preprocessing import scale
vecs = np.concatenate([build_word_vector(model, z, n_dim) for z in collection])
vecs = scale(vecs)
return vecs
def train_word2vec(categories, comments, n_dim):
from feature_extraction import tokenize_document
from feature_extraction import word2vec_model
from sklearn.linear_model import SGDClassifier
documents = [tokenize_document(document) for document in comments]
model = word2vec_model(documents, n_dim)
train_vecs = w2vectorize(documents, model, n_dim)
classifier = SGDClassifier(loss='log', penalty='l1')
classifier.fit(train_vecs, categories)
return model, classifier
def train_custom(comments, categories):
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from feature_extraction import CustomTransformer
text_clf = Pipeline([('vect', CustomTransformer()),
('clf', LinearSVC(random_state=42, dual=False))])
text_clf = text_clf.fit(comments, categories)
return text_clf
def train_feature_union(comments, categories):
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_selection import SelectPercentile, chi2
from feature_extraction import CustomTransformer
select = SelectPercentile(score_func=chi2, percentile=70)
countvect_word = TfidfVectorizer(lowercase=True, ngram_range=(1, 3), analyzer="word", binary=False, min_df=3)
custom = CustomTransformer()
union = FeatureUnion([("custom", custom), ("words", countvect_word)])
clf = LogisticRegression(tol=1e-8, penalty='l2', C=4, max_iter=10000)
classifier = Pipeline([('vect', union), ('select', select), ('clf', clf)])
classifier = classifier.fit(comments, categories)
return classifier
def train_assembling_voting(comments, categories):
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import VotingClassifier
from feature_extraction import CustomTransformer
text_clf = Pipeline([('vect', TfidfVectorizer(lowercase=True, ngram_range=(1, 3), analyzer="word", min_df=3)),
('clf', SGDClassifier(loss='log', penalty='l2', alpha=1e-3,
random_state=42))])
custom = CustomTransformer()
clf = Pipeline([('vect', custom),
('clf', SGDClassifier(loss='log', penalty='l2',
alpha=1e-3, random_state=42))])
final_classifier = VotingClassifier(estimators=[('text', text_clf), ('custom', clf)],
voting='soft', weights=[3, 1])
final_classifier = final_classifier.fit(comments, categories)
return final_classifier
| {
"content_hash": "04ce003699cb0e34a60d794981f0418d",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 114,
"avg_line_length": 39.01652892561984,
"alnum_prop": 0.6390595212878627,
"repo_name": "rafaharo/trolling_detection",
"id": "69fb6ba1128fcbb5a071e157fb0a0ebe0bf53a79",
"size": "4721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/trolling_detection/training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "188793"
},
{
"name": "Python",
"bytes": "12206"
}
],
"symlink_target": ""
} |
lambda a : 1
lambda : meta.lambda-function.python, source.python, storage.type.function.lambda.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
a : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: : meta.lambda-function.python, punctuation.section.function.lambda.begin.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
| {
"content_hash": "0164e4a41c4d0d164bcd6b269a79fee9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 143,
"avg_line_length": 65.1,
"alnum_prop": 0.7050691244239631,
"repo_name": "MagicStack/MagicPython",
"id": "710f5d5671a79368e231c6dec9e5dad199cfaec7",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functions/lambda4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6854"
},
{
"name": "JavaScript",
"bytes": "1338031"
},
{
"name": "Makefile",
"bytes": "2083"
},
{
"name": "Python",
"bytes": "666648"
},
{
"name": "Reason",
"bytes": "11395"
}
],
"symlink_target": ""
} |
"""Utilities for connecting to jupyter kernels
The :class:`ConnectionFileMixin` class in this module encapsulates the logic
related to writing and reading connections files.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import glob
import json
import os
import socket
import stat
import tempfile
import warnings
from getpass import getpass
import zmq
from traitlets.config import LoggingConfigurable
from .localinterfaces import localhost
from ipython_genutils.path import filefind
from ipython_genutils.py3compat import (
bytes_to_str, cast_bytes, cast_bytes_py2, string_types,
)
from traitlets import (
Bool, Integer, Unicode, CaselessStrEnum, Instance, Type,
)
from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir
def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0,
control_port=0, ip='', key=b'', transport='tcp',
signature_scheme='hmac-sha256', kernel_name=''
):
"""Generates a JSON config file, including the selection of random ports.
Parameters
----------
fname : unicode
The path to the file to write
shell_port : int, optional
The port to use for ROUTER (shell) channel.
iopub_port : int, optional
The port to use for the SUB channel.
stdin_port : int, optional
The port to use for the ROUTER (raw input) channel.
control_port : int, optional
The port to use for the ROUTER (control) channel.
hb_port : int, optional
The port to use for the heartbeat REP channel.
ip : str, optional
The ip address the kernel will bind to.
key : str, optional
The Session key used for message authentication.
signature_scheme : str, optional
The scheme used for message authentication.
This has the form 'digest-hash', where 'digest'
is the scheme used for digests, and 'hash' is the name of the hash function
used by the digest scheme.
Currently, 'hmac' is the only supported digest scheme,
and 'sha256' is the default hash function.
kernel_name : str, optional
The name of the kernel currently connected to.
"""
if not ip:
ip = localhost()
# default to temporary connector file
if not fname:
fd, fname = tempfile.mkstemp('.json')
os.close(fd)
# Find open ports as necessary.
ports = []
ports_needed = int(shell_port <= 0) + \
int(iopub_port <= 0) + \
int(stdin_port <= 0) + \
int(control_port <= 0) + \
int(hb_port <= 0)
if transport == 'tcp':
for i in range(ports_needed):
sock = socket.socket()
# struct.pack('ii', (0,0)) is 8 null bytes
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8)
sock.bind((ip, 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
else:
N = 1
for i in range(ports_needed):
while os.path.exists("%s-%s" % (ip, str(N))):
N += 1
ports.append(N)
N += 1
if shell_port <= 0:
shell_port = ports.pop(0)
if iopub_port <= 0:
iopub_port = ports.pop(0)
if stdin_port <= 0:
stdin_port = ports.pop(0)
if control_port <= 0:
control_port = ports.pop(0)
if hb_port <= 0:
hb_port = ports.pop(0)
cfg = dict( shell_port=shell_port,
iopub_port=iopub_port,
stdin_port=stdin_port,
control_port=control_port,
hb_port=hb_port,
)
cfg['ip'] = ip
cfg['key'] = bytes_to_str(key)
cfg['transport'] = transport
cfg['signature_scheme'] = signature_scheme
cfg['kernel_name'] = kernel_name
with open(fname, 'w') as f:
f.write(json.dumps(cfg, indent=2))
if hasattr(stat, 'S_ISVTX'):
# set the sticky bit on the file and its parent directory
# to avoid periodic cleanup
paths = [fname]
runtime_dir = os.path.dirname(fname)
if runtime_dir:
paths.append(runtime_dir)
for path in paths:
permissions = os.stat(path).st_mode
new_permissions = permissions | stat.S_ISVTX
if new_permissions != permissions:
try:
os.chmod(path, permissions)
except OSError as e:
# failed to set sticky bit,
# probably not a big deal
warnings.warn(
"Failed to set sticky bit on %r: %s" % (path, e),
RuntimeWarning,
)
return fname, cfg
def find_connection_file(filename='kernel-*.json', path=None, profile=None):
"""find a connection file, and return its absolute path.
The current working directory and optional search path
will be searched for the file if it is not given by absolute path.
If the argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the profile's security dir with
the latest access time will be used.
Parameters
----------
filename : str
The connection file or fileglob to search for.
path : str or list of strs[optional]
Paths in which to search for connection files.
Returns
-------
str : The absolute path of the connection file.
"""
if profile is not None:
warnings.warn("Jupyter has no profiles. profile=%s has been ignored." % profile)
if path is None:
path = ['.', jupyter_runtime_dir()]
if isinstance(path, string_types):
path = [path]
try:
# first, try explicit name
return filefind(filename, path)
except IOError:
pass
# not found by full name
if '*' in filename:
# given as a glob already
pat = filename
else:
# accept any substring match
pat = '*%s*' % filename
matches = []
for p in path:
matches.extend(glob.glob(os.path.join(p, pat)))
matches = [ os.path.abspath(m) for m in matches ]
if not matches:
raise IOError("Could not find %r in %r" % (filename, path))
elif len(matches) == 1:
return matches[0]
else:
# get most recent match, by access time:
return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1]
def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
"""tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
"""
from zmq.ssh import tunnel
if isinstance(connection_info, string_types):
# it's a path, unpack it
with open(connection_info) as f:
connection_info = json.loads(f.read())
cf = connection_info
lports = tunnel.select_random_ports(4)
rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port']
remote_ip = cf['ip']
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: " % cast_bytes_py2(sshserver))
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports)
#-----------------------------------------------------------------------------
# Mixin for classes that work with connection files
#-----------------------------------------------------------------------------
channel_socket_types = {
'hb' : zmq.REQ,
'shell' : zmq.DEALER,
'iopub' : zmq.SUB,
'stdin' : zmq.DEALER,
'control': zmq.DEALER,
}
port_names = [ "%s_port" % channel for channel in ('shell', 'stdin', 'iopub', 'hb', 'control')]
class ConnectionFileMixin(LoggingConfigurable):
"""Mixin for configurable classes that work with connection files"""
data_dir = Unicode()
def _data_dir_default(self):
return jupyter_data_dir()
# The addresses for the communication channels
connection_file = Unicode('', config=True,
help="""JSON file in which to store connection info [default: kernel-<pid>.json]
This file will contain the IP, ports, and authentication key needed to connect
clients to this kernel. By default, this file will be created in the security dir
of the current profile, but can be specified by absolute path.
""")
_connection_file_written = Bool(False)
transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True)
ip = Unicode(config=True,
help="""Set the kernel\'s IP address [default localhost].
If the IP address is something other than localhost, then
Consoles on other machines will be able to connect
to the Kernel, so be careful!"""
)
def _ip_default(self):
if self.transport == 'ipc':
if self.connection_file:
return os.path.splitext(self.connection_file)[0] + '-ipc'
else:
return 'kernel-ipc'
else:
return localhost()
def _ip_changed(self, name, old, new):
if new == '*':
self.ip = '0.0.0.0'
# protected traits
hb_port = Integer(0, config=True,
help="set the heartbeat port [default: random]")
shell_port = Integer(0, config=True,
help="set the shell (ROUTER) port [default: random]")
iopub_port = Integer(0, config=True,
help="set the iopub (PUB) port [default: random]")
stdin_port = Integer(0, config=True,
help="set the stdin (ROUTER) port [default: random]")
control_port = Integer(0, config=True,
help="set the control (ROUTER) port [default: random]")
@property
def ports(self):
return [ getattr(self, name) for name in port_names ]
# The Session to use for communication with the kernel.
session = Instance('jupyter_client.session.Session')
def _session_default(self):
from jupyter_client.session import Session
return Session(parent=self)
#--------------------------------------------------------------------------
# Connection and ipc file management
#--------------------------------------------------------------------------
def get_connection_info(self, session=False):
"""Return the connection info as a dict
Parameters
----------
session : bool [default: False]
If True, return our session object will be included in the connection info.
If False (default), the configuration parameters of our session object will be included,
rather than the session object itself.
Returns
-------
connect_info : dict
dictionary of connection information.
"""
info = dict(
transport=self.transport,
ip=self.ip,
shell_port=self.shell_port,
iopub_port=self.iopub_port,
stdin_port=self.stdin_port,
hb_port=self.hb_port,
control_port=self.control_port,
)
if session:
# add *clone* of my session,
# so that state such as digest_history is not shared.
info['session'] = self.session.clone()
else:
# add session info
info.update(dict(
signature_scheme=self.session.signature_scheme,
key=self.session.key,
))
return info
# factory for blocking clients
blocking_class = Type(klass=object, default_value='jupyter_client.BlockingKernelClient')
def blocking_client(self):
"""Make a blocking client connected to my kernel"""
info = self.get_connection_info()
info['parent'] = self
bc = self.blocking_class(**info)
bc.session.key = self.session.key
return bc
def cleanup_connection_file(self):
"""Cleanup connection file *if we wrote it*
Will not raise if the connection file was already removed somehow.
"""
if self._connection_file_written:
# cleanup connection files on full shutdown of kernel we started
self._connection_file_written = False
try:
os.remove(self.connection_file)
except (IOError, OSError, AttributeError):
pass
def cleanup_ipc_files(self):
"""Cleanup ipc files if we wrote them."""
if self.transport != 'ipc':
return
for port in self.ports:
ipcfile = "%s-%i" % (self.ip, port)
try:
os.remove(ipcfile)
except (IOError, OSError):
pass
def write_connection_file(self):
"""Write connection info to JSON dict in self.connection_file."""
if self._connection_file_written and os.path.exists(self.connection_file):
return
self.connection_file, cfg = write_connection_file(self.connection_file,
transport=self.transport, ip=self.ip, key=self.session.key,
stdin_port=self.stdin_port, iopub_port=self.iopub_port,
shell_port=self.shell_port, hb_port=self.hb_port,
control_port=self.control_port,
signature_scheme=self.session.signature_scheme,
kernel_name=self.kernel_name
)
# write_connection_file also sets default ports:
for name in port_names:
setattr(self, name, cfg[name])
self._connection_file_written = True
def load_connection_file(self, connection_file=None):
"""Load connection info from JSON dict in self.connection_file.
Parameters
----------
connection_file: unicode, optional
Path to connection file to load.
If unspecified, use self.connection_file
"""
if connection_file is None:
connection_file = self.connection_file
self.log.debug(u"Loading connection file %s", connection_file)
with open(connection_file) as f:
info = json.load(f)
self.load_connection_info(info)
def load_connection_info(self, info):
"""Load connection info from a dict containing connection info.
Typically this data comes from a connection file
and is called by load_connection_file.
Parameters
----------
info: dict
Dictionary containing connection_info.
See the connection_file spec for details.
"""
self.transport = info.get('transport', self.transport)
self.ip = info.get('ip', self._ip_default())
for name in port_names:
if getattr(self, name) == 0 and name in info:
# not overridden by config or cl_args
setattr(self, name, info[name])
if 'key' in info:
self.session.key = cast_bytes(info['key'])
if 'signature_scheme' in info:
self.session.signature_scheme = info['signature_scheme']
#--------------------------------------------------------------------------
# Creating connected sockets
#--------------------------------------------------------------------------
def _make_url(self, channel):
"""Make a ZeroMQ URL for a given channel."""
transport = self.transport
ip = self.ip
port = getattr(self, '%s_port' % channel)
if transport == 'tcp':
return "tcp://%s:%i" % (ip, port)
else:
return "%s://%s-%s" % (transport, ip, port)
def _create_connected_socket(self, channel, identity=None):
"""Create a zmq Socket and connect it to the kernel."""
url = self._make_url(channel)
socket_type = channel_socket_types[channel]
self.log.debug("Connecting to: %s" % url)
sock = self.context.socket(socket_type)
# set linger to 1s to prevent hangs at exit
sock.linger = 1000
if identity:
sock.identity = identity
sock.connect(url)
return sock
def connect_iopub(self, identity=None):
"""return zmq Socket connected to the IOPub channel"""
sock = self._create_connected_socket('iopub', identity=identity)
sock.setsockopt(zmq.SUBSCRIBE, b'')
return sock
def connect_shell(self, identity=None):
"""return zmq Socket connected to the Shell channel"""
return self._create_connected_socket('shell', identity=identity)
def connect_stdin(self, identity=None):
"""return zmq Socket connected to the StdIn channel"""
return self._create_connected_socket('stdin', identity=identity)
def connect_hb(self, identity=None):
"""return zmq Socket connected to the Heartbeat channel"""
return self._create_connected_socket('hb', identity=identity)
def connect_control(self, identity=None):
"""return zmq Socket connected to the Control channel"""
return self._create_connected_socket('control', identity=identity)
__all__ = [
'write_connection_file',
'find_connection_file',
'tunnel_to_kernel',
]
| {
"content_hash": "07efd9b7f17b8fd02a6503a69abc7123",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 100,
"avg_line_length": 34.16448598130841,
"alnum_prop": 0.5856220593062699,
"repo_name": "unnikrishnankgs/va",
"id": "042904f0e7483abf9f29fb8e94c947424b417cd3",
"size": "18278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/jupyter_client/connect.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1836035"
},
{
"name": "C++",
"bytes": "12002305"
},
{
"name": "CMake",
"bytes": "128"
},
{
"name": "CSS",
"bytes": "64776"
},
{
"name": "Cuda",
"bytes": "78890"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "297329"
},
{
"name": "JavaScript",
"bytes": "4313047"
},
{
"name": "Jupyter Notebook",
"bytes": "603900"
},
{
"name": "Makefile",
"bytes": "7573"
},
{
"name": "Nginx",
"bytes": "544"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "PureBasic",
"bytes": "134"
},
{
"name": "Python",
"bytes": "51104955"
},
{
"name": "Shell",
"bytes": "71646"
},
{
"name": "Smarty",
"bytes": "28890"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import google_search.crawler as crawler
if __name__ == '__main__':
search_query = "united kingdom"
result = crawler.search_web(search_query)
# result = crawler.search_news(search_query)
print(result)
for item in result:
print(item)
| {
"content_hash": "197d85e6d89ee92ff875edbe41b96f01",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 23.90909090909091,
"alnum_prop": 0.6463878326996197,
"repo_name": "xibowang/google-crawler",
"id": "e67f9c24e6eaec77dd1588378a1659a727ee0b07",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4939"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/policy_user -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_policy_user
short_description: Module to manage openshift policy for users
description:
- Manage openshift policy for users.
options:
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
namespace:
description:
- The namespace scope
required: false
default: None
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
user:
description:
- The name of the user
required: true
default: None
aliases: []
resource_kind:
description:
- The kind of policy to affect
required: true
default: None
choices: ["role", "cluster-role", "scc"]
aliases: []
resource_name:
description:
- The name of the policy
required: true
default: None
aliases: []
state:
description:
- Desired state of the policy
required: true
default: present
choices: ["present", "absent"]
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: oc adm policy remove-scc-from-user an-scc ausername
oc_adm_policy_user:
user: ausername
resource_kind: scc
resource_name: an-scc
state: absent
- name: oc adm policy add-cluster-role-to-user system:build-strategy-docker ausername
oc_adm_policy_user:
user: ausername
resource_kind: cluster-role
resource_name: system:build-strategy-docker
state: present
'''
# -*- -*- -*- End included fragment: doc/policy_user -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class RoleBindingConfig(object):
''' Handle rolebinding config '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kubeconfig,
group_names=None,
role_ref=None,
subjects=None,
usernames=None):
''' constructor for handling rolebinding options '''
self.kubeconfig = kubeconfig
self.name = name
self.namespace = namespace
self.group_names = group_names
self.role_ref = role_ref
self.subjects = subjects
self.usernames = usernames
self.data = {}
self.create_dict()
def create_dict(self):
''' create a default rolebinding as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'RoleBinding'
self.data['groupNames'] = self.group_names
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['roleRef'] = self.role_ref
self.data['subjects'] = self.subjects
self.data['userNames'] = self.usernames
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class RoleBinding(Yedit):
''' Class to model a rolebinding openshift object'''
group_names_path = "groupNames"
role_ref_path = "roleRef"
subjects_path = "subjects"
user_names_path = "userNames"
kind = 'RoleBinding'
def __init__(self, content):
'''RoleBinding constructor'''
super(RoleBinding, self).__init__(content=content)
self._subjects = None
self._role_ref = None
self._group_names = None
self._user_names = None
@property
def subjects(self):
''' subjects property '''
if self._subjects is None:
self._subjects = self.get_subjects()
return self._subjects
@subjects.setter
def subjects(self, data):
''' subjects property setter'''
self._subjects = data
@property
def role_ref(self):
''' role_ref property '''
if self._role_ref is None:
self._role_ref = self.get_role_ref()
return self._role_ref
@role_ref.setter
def role_ref(self, data):
''' role_ref property setter'''
self._role_ref = data
@property
def group_names(self):
''' group_names property '''
if self._group_names is None:
self._group_names = self.get_group_names()
return self._group_names
@group_names.setter
def group_names(self, data):
''' group_names property setter'''
self._group_names = data
@property
def user_names(self):
''' user_names property '''
if self._user_names is None:
self._user_names = self.get_user_names()
return self._user_names
@user_names.setter
def user_names(self, data):
''' user_names property setter'''
self._user_names = data
def get_group_names(self):
''' return groupNames '''
return self.get(RoleBinding.group_names_path) or []
def get_user_names(self):
''' return usernames '''
return self.get(RoleBinding.user_names_path) or []
def get_role_ref(self):
''' return role_ref '''
return self.get(RoleBinding.role_ref_path) or {}
def get_subjects(self):
''' return subjects '''
return self.get(RoleBinding.subjects_path) or []
#### ADD #####
def add_subject(self, inc_subject):
''' add a subject '''
if self.subjects:
# pylint: disable=no-member
self.subjects.append(inc_subject)
else:
self.put(RoleBinding.subjects_path, [inc_subject])
return True
def add_role_ref(self, inc_role_ref):
''' add a role_ref '''
if not self.role_ref:
self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
return True
return False
def add_group_names(self, inc_group_names):
''' add a group_names '''
if self.group_names:
# pylint: disable=no-member
self.group_names.append(inc_group_names)
else:
self.put(RoleBinding.group_names_path, [inc_group_names])
return True
def add_user_name(self, inc_user_name):
''' add a username '''
if self.user_names:
# pylint: disable=no-member
self.user_names.append(inc_user_name)
else:
self.put(RoleBinding.user_names_path, [inc_user_name])
return True
#### /ADD #####
#### Remove #####
def remove_subject(self, inc_subject):
''' remove a subject '''
try:
# pylint: disable=no-member
self.subjects.remove(inc_subject)
except ValueError as _:
return False
return True
def remove_role_ref(self, inc_role_ref):
''' remove a role_ref '''
if self.role_ref and self.role_ref['name'] == inc_role_ref:
del self.role_ref['name']
return True
return False
def remove_group_name(self, inc_group_name):
''' remove a groupname '''
try:
# pylint: disable=no-member
self.group_names.remove(inc_group_name)
except ValueError as _:
return False
return True
def remove_user_name(self, inc_user_name):
''' remove a username '''
try:
# pylint: disable=no-member
self.user_names.remove(inc_user_name)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_subject(self, inc_subject):
''' update a subject '''
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return self.add_subject(inc_subject)
self.subjects[index] = inc_subject
return True
def update_group_name(self, inc_group_name):
''' update a groupname '''
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return self.add_group_names(inc_group_name)
self.group_names[index] = inc_group_name
return True
def update_user_name(self, inc_user_name):
''' update a username '''
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return self.add_user_name(inc_user_name)
self.user_names[index] = inc_user_name
return True
def update_role_ref(self, inc_role_ref):
''' update a role_ref '''
self.role_ref['name'] = inc_role_ref
return True
#### /UPDATE #####
#### FIND ####
def find_subject(self, inc_subject):
''' find a subject '''
index = None
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return index
return index
def find_group_name(self, inc_group_name):
''' find a group_name '''
index = None
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return index
return index
def find_user_name(self, inc_user_name):
''' find a user_name '''
index = None
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return index
return index
def find_role_ref(self, inc_role_ref):
''' find a user_name '''
if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
return self.role_ref
return None
# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/scc.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecurityContextConstraintsConfig(object):
''' Handle scc options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
kubeconfig,
options=None,
fs_group='MustRunAs',
default_add_capabilities=None,
groups=None,
priority=None,
required_drop_capabilities=None,
run_as_user='MustRunAsRange',
se_linux_context='MustRunAs',
supplemental_groups='RunAsAny',
users=None,
annotations=None):
''' constructor for handling scc options '''
self.kubeconfig = kubeconfig
self.name = sname
self.options = options
self.fs_group = fs_group
self.default_add_capabilities = default_add_capabilities
self.groups = groups
self.priority = priority
self.required_drop_capabilities = required_drop_capabilities
self.run_as_user = run_as_user
self.se_linux_context = se_linux_context
self.supplemental_groups = supplemental_groups
self.users = users
self.annotations = annotations
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a scc dict '''
# allow options
if self.options:
for key, value in self.options.items():
self.data[key] = value
else:
self.data['allowHostDirVolumePlugin'] = False
self.data['allowHostIPC'] = False
self.data['allowHostNetwork'] = False
self.data['allowHostPID'] = False
self.data['allowHostPorts'] = False
self.data['allowPrivilegedContainer'] = False
self.data['allowedCapabilities'] = None
# version
self.data['apiVersion'] = 'v1'
# kind
self.data['kind'] = 'SecurityContextConstraints'
# defaultAddCapabilities
self.data['defaultAddCapabilities'] = self.default_add_capabilities
# fsGroup
self.data['fsGroup']['type'] = self.fs_group
# groups
self.data['groups'] = []
if self.groups:
self.data['groups'] = self.groups
# metadata
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
if self.annotations:
for key, value in self.annotations.items():
self.data['metadata'][key] = value
# priority
self.data['priority'] = self.priority
# requiredDropCapabilities
self.data['requiredDropCapabilities'] = self.required_drop_capabilities
# runAsUser
self.data['runAsUser'] = {'type': self.run_as_user}
# seLinuxContext
self.data['seLinuxContext'] = {'type': self.se_linux_context}
# supplementalGroups
self.data['supplementalGroups'] = {'type': self.supplemental_groups}
# users
self.data['users'] = []
if self.users:
self.data['users'] = self.users
# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
class SecurityContextConstraints(Yedit):
''' Class to wrap the oc command line tools '''
default_add_capabilities_path = "defaultAddCapabilities"
fs_group_path = "fsGroup"
groups_path = "groups"
priority_path = "priority"
required_drop_capabilities_path = "requiredDropCapabilities"
run_as_user_path = "runAsUser"
se_linux_context_path = "seLinuxContext"
supplemental_groups_path = "supplementalGroups"
users_path = "users"
kind = 'SecurityContextConstraints'
def __init__(self, content):
'''SecurityContextConstraints constructor'''
super(SecurityContextConstraints, self).__init__(content=content)
self._users = None
self._groups = None
@property
def users(self):
''' users property getter '''
if self._users is None:
self._users = self.get_users()
return self._users
@property
def groups(self):
''' groups property getter '''
if self._groups is None:
self._groups = self.get_groups()
return self._groups
@users.setter
def users(self, data):
''' users property setter'''
self._users = data
@groups.setter
def groups(self, data):
''' groups property setter'''
self._groups = data
def get_users(self):
'''get scc users'''
return self.get(SecurityContextConstraints.users_path) or []
def get_groups(self):
'''get scc groups'''
return self.get(SecurityContextConstraints.groups_path) or []
def add_user(self, inc_user):
''' add a user '''
if self.users:
self.users.append(inc_user)
else:
self.put(SecurityContextConstraints.users_path, [inc_user])
return True
def add_group(self, inc_group):
''' add a group '''
if self.groups:
self.groups.append(inc_group)
else:
self.put(SecurityContextConstraints.groups_path, [inc_group])
return True
def remove_user(self, inc_user):
''' remove a user '''
try:
self.users.remove(inc_user)
except ValueError as _:
return False
return True
def remove_group(self, inc_group):
''' remove a group '''
try:
self.groups.remove(inc_group)
except ValueError as _:
return False
return True
def update_user(self, inc_user):
''' update a user '''
try:
index = self.users.index(inc_user)
except ValueError as _:
return self.add_user(inc_user)
self.users[index] = inc_user
return True
def update_group(self, inc_group):
''' update a group '''
try:
index = self.groups.index(inc_group)
except ValueError as _:
return self.add_group(inc_group)
self.groups[index] = inc_group
return True
def find_user(self, inc_user):
''' find a user '''
index = None
try:
index = self.users.index(inc_user)
except ValueError as _:
return index
return index
def find_group(self, inc_group):
''' find a group '''
index = None
try:
index = self.groups.index(inc_group)
except ValueError as _:
return index
return index
# -*- -*- -*- End included fragment: lib/scc.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_policy_user.py -*- -*- -*-
class PolicyUserException(Exception):
''' PolicyUser exception'''
pass
class PolicyUserConfig(OpenShiftCLIConfig):
''' PolicyUserConfig is a DTO for user related policy. '''
def __init__(self, namespace, kubeconfig, policy_options):
super(PolicyUserConfig, self).__init__(policy_options['name']['value'],
namespace, kubeconfig, policy_options)
self.kind = self.get_kind()
self.namespace = namespace
def get_kind(self):
''' return the kind we are working with '''
if self.config_options['resource_kind']['value'] == 'role':
return 'rolebinding'
elif self.config_options['resource_kind']['value'] == 'cluster-role':
return 'clusterrolebinding'
elif self.config_options['resource_kind']['value'] == 'scc':
return 'scc'
return None
# pylint: disable=too-many-return-statements
class PolicyUser(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
policy_config,
verbose=False):
''' Constructor for PolicyUser '''
super(PolicyUser, self).__init__(policy_config.namespace, policy_config.kubeconfig, verbose)
self.config = policy_config
self.verbose = verbose
self._rolebinding = None
self._scc = None
self._cluster_policy_bindings = None
self._policy_bindings = None
@property
def policybindings(self):
if self._policy_bindings is None:
results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
return self._policy_bindings
@property
def clusterpolicybindings(self):
if self._cluster_policy_bindings is None:
results = self._get('clusterpolicybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
self._cluster_policy_bindings = results['results'][0]['items'][0]
return self._cluster_policy_bindings
@property
def role_binding(self):
''' role_binding property '''
return self._rolebinding
@role_binding.setter
def role_binding(self, binding):
''' setter for role_binding property '''
self._rolebinding = binding
@property
def security_context_constraint(self):
''' security_context_constraint property '''
return self._scc
@security_context_constraint.setter
def security_context_constraint(self, scc):
''' setter for security_context_constraint property '''
self._scc = scc
def get(self):
'''fetch the desired kind
This is only used for scc objects.
The {cluster}rolebindings happen in exists.
'''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
return self._get(self.config.kind, resource_name)
def exists_role_binding(self):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
bindings = self.clusterpolicybindings
else:
bindings = self.policybindings
if bindings is None:
return False
for binding in bindings['roleBindings']:
_rb = binding['roleBinding']
if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
_rb['userNames'] is not None and \
self.config.config_options['user']['value'] in _rb['userNames']:
self.role_binding = binding
return True
return False
def exists_scc(self):
''' return whether scc exists '''
results = self.get()
if results['returncode'] == 0:
self.security_context_constraint = SecurityContextConstraints(results['results'][0])
if self.security_context_constraint.find_user(self.config.config_options['user']['value']) != None:
return True
return False
return results
def exists(self):
'''does the object exist?'''
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'scc':
return self.exists_scc()
return False
def perform(self):
'''perform action on resource'''
cmd = ['policy',
self.config.config_options['action']['value'],
self.config.config_options['name']['value'],
self.config.config_options['user']['value']]
return self.openshift_cmd(cmd, oadm=True)
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
state = params['state']
action = None
if state == 'present':
action = 'add-' + params['resource_kind'] + '-to-user'
else:
action = 'remove-' + params['resource_kind'] + '-from-user'
nconfig = PolicyUserConfig(params['namespace'],
params['kubeconfig'],
{'action': {'value': action, 'include': False},
'user': {'value': params['user'], 'include': False},
'resource_kind': {'value': params['resource_kind'], 'include': False},
'name': {'value': params['resource_name'], 'include': False},
})
policyuser = PolicyUser(nconfig, params['debug'])
# Run the oc adm policy user related command
########
# Delete
########
if state == 'absent':
if not policyuser.exists():
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
api_rval = policyuser.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results' : api_rval, state:'absent'}
if state == 'present':
########
# Create
########
results = policyuser.exists()
if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
return {'msg': results}
if not results:
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
api_rval = policyuser.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results': api_rval, state: 'present'}
return {'changed': False, state: 'present'}
return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
# -*- -*- -*- End included fragment: class/oc_adm_policy_user.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_policy_user.py -*- -*- -*-
def main():
'''
ansible oc adm module for user policy
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
resource_name=dict(required=True, type='str'),
namespace=dict(default='default', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
user=dict(required=True, type='str'),
resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
),
supports_check_mode=True,
)
results = PolicyUser.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == "__main__":
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_policy_user.py -*- -*- -*-
| {
"content_hash": "194477c2618d37a465d956ade9246e62",
"timestamp": "",
"source": "github",
"line_count": 2144,
"max_line_length": 118,
"avg_line_length": 32.88106343283582,
"alnum_prop": 0.5407747847426132,
"repo_name": "ivanhorvath/openshift-tools",
"id": "4c55827682d2b4921ea26b0827d6a344f8fb6c79",
"size": "71659",
"binary": false,
"copies": "5",
"ref": "refs/heads/prod",
"path": "openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/lib_openshift/library/oc_adm_policy_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Dockerfile",
"bytes": "70267"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "146500"
},
{
"name": "JavaScript",
"bytes": "2380"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "37739486"
},
{
"name": "Shell",
"bytes": "1643890"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
"""Utility methods for customization args of interactions."""
from __future__ import annotations
from core import schema_utils
from core import utils
def get_full_customization_args(customization_args, ca_specs):
"""Populates the given customization_args dict with default values
if any of the expected customization_args are missing.
Args:
customization_args: dict. The customization dict. The keys are names
of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
ca_specs: list(dict). List of spec dictionaries. Is used to check if
some keys are missing in customization_args. Dicts have the
following structure:
- name: str. The customization variable name.
- description: str. The customization variable description.
- default_value: *. The default value of the customization
variable.
Returns:
dict. The customization_args dict where missing keys are populated
with the default values.
"""
for ca_spec in ca_specs:
if ca_spec.name not in customization_args:
customization_args[ca_spec.name] = {
'value': ca_spec.default_value
}
return customization_args
def validate_customization_args_and_values(
item_name, item_type, customization_args,
ca_specs_to_validate_against, fail_on_validation_errors=False):
"""Validates the given `customization_args` dict against the specs set
out in 'ca_specs_to_validate_against'. 'item_name' and 'item_type' are
used to populate any error messages that arise during validation.
Note that this may modify the given customization_args dict, if it has
extra keys. It also normalizes any HTML in the customization_args dict.
Args:
item_name: str. This is always 'interaction'.
item_type: str. The item_type is the ID of the interaction.
customization_args: dict. The customization dict. The keys are names
of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
ca_specs_to_validate_against: list(dict). List of spec dictionaries.
Is used to check if some keys are missing in customization_args.
Dicts have the following structure:
- name: str. The customization variable name.
- description: str. The customization variable description.
- default_value: *. The default value of the customization
variable.
fail_on_validation_errors: bool. Whether to raise errors if
validation fails for customization args.
Raises:
ValidationError. The given 'customization_args' is not valid.
ValidationError. The given 'customization_args' is missing at least one
key.
"""
ca_spec_names = [
ca_spec.name for ca_spec in ca_specs_to_validate_against]
if not isinstance(customization_args, dict):
raise utils.ValidationError(
'Expected customization args to be a dict, received %s'
% customization_args)
# Check for extra invalid keys.
for arg_name in customization_args.keys():
if not isinstance(arg_name, str):
raise utils.ValidationError(
'Invalid customization arg name: %s' % arg_name)
if arg_name not in ca_spec_names:
raise utils.ValidationError(
'%s %s does not support customization arg %s.'
% (item_name.capitalize(), item_type, arg_name))
# Check that each value has the correct type.
for ca_spec in ca_specs_to_validate_against:
if ca_spec.name not in customization_args:
raise utils.ValidationError(
'Customization argument is missing key: %s' % ca_spec.name)
try:
customization_args[ca_spec.name]['value'] = (
schema_utils.normalize_against_schema(
customization_args[ca_spec.name]['value'],
ca_spec.schema))
except Exception as e:
# TODO(sll): Raise an actual exception here if parameters are
# not involved (If they are, can we get sample values for the
# state context parameters?).
if fail_on_validation_errors:
raise utils.ValidationError(e)
| {
"content_hash": "d6b307cecafa9dcb4be90db733ac714b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 44.990196078431374,
"alnum_prop": 0.6391370668991065,
"repo_name": "kevinlee12/oppia",
"id": "7e4f739eaaa786b9cd050324d5bbe031964755f6",
"size": "5212",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "core/domain/customization_args_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "205771"
},
{
"name": "HTML",
"bytes": "1835761"
},
{
"name": "JavaScript",
"bytes": "1182599"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "13670639"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13024194"
}
],
"symlink_target": ""
} |
import sys
import os
if len(sys.argv) < 5:
print 'Usage: ipinyou.folder 25 train.log.txt test.log.txt'
# python splitadvertisers.py ../ 25 ../all/train.log.txt ../all/test.log.txt
exit(-1)
ifolder = sys.argv[1]
if not ifolder.endswith('/'):
ifolder = ifolder + '/'
adidx = int(sys.argv[2])
for i in range(3, len(sys.argv)):
fi = open(sys.argv[i], 'r')
first = True
advertiserFos = {}
hearder = ""
for line in fi:
if first:
first = False
header = line
continue
advertiser = line.split('\t')[adidx]
if advertiser not in advertiserFos:
if not os.path.exists(ifolder + advertiser):
os.makedirs(ifolder + advertiser)
fname = sys.argv[i][(sys.argv[i].rfind('/') + 1):]
advertiserFos[advertiser] = open(ifolder + advertiser + '/' + fname, 'w')
advertiserFos[advertiser].write(header)
advertiserFos[advertiser].write(line)
for advertiser in advertiserFos:
advertiserFos[advertiser].close()
| {
"content_hash": "4b134636bb184ffb1736b39531515089",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 31.38235294117647,
"alnum_prop": 0.5866916588566073,
"repo_name": "orenov/make-ipinyou-data",
"id": "43bf20a62b75bb7554510de17c8dba1d641253e8",
"size": "1085",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/splitadvertisers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "8624"
},
{
"name": "Shell",
"bytes": "277"
}
],
"symlink_target": ""
} |
from sqlalchemy import select, Column
from sqlalchemy import MetaData, Integer, String, Table
from migrate import ForeignKeyConstraint
from nova import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
instance_uuid_column = Column('instance_uuid', String(36))
instance_uuid_column.create(volumes)
try:
volumes.update().values(
instance_uuid=select(
[instances.c.uuid],
instances.c.id == volumes.c.instance_id)
).execute()
except Exception:
instance_uuid_column.drop()
fkeys = list(volumes.c.instance_id.foreign_keys)
if fkeys:
try:
fk_name = fkeys[0].constraint.name
ForeignKeyConstraint(
columns=[volumes.c.instance_id],
refcolumns=[instances.c.id],
name=fk_name).drop()
except Exception:
LOG.error(_("foreign key could not be dropped"))
raise
volumes.c.instance_id.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
volumes = Table('volumes', meta, autoload=True)
instance_id_column = Column('instance_id', Integer)
instance_id_column.create(volumes)
try:
volumes.update().values(
instance_id=select(
[instances.c.id],
instances.c.uuid == volumes.c.instance_uuid)
).execute()
except Exception:
instance_id_column.drop()
fkeys = list(volumes.c.instance_id.foreign_keys)
if fkeys:
try:
fk_name = fkeys[0].constraint.name
ForeignKeyConstraint(
columns=[volumes.c.instance_id],
refcolumns=[instances.c.id],
name=fk_name).create()
except Exception:
LOG.error(_("foreign key could not be created"))
raise
volumes.c.instance_uuid.drop()
| {
"content_hash": "c86eb444273305707ef9dda0fd9a7501",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 62,
"avg_line_length": 28.56578947368421,
"alnum_prop": 0.5946568401658222,
"repo_name": "josephsuh/extra-specs",
"id": "ea8684f3df1ac4894ce2606aa430d6396e87c092",
"size": "2877",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6005171"
},
{
"name": "Shell",
"bytes": "26155"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
class TestOneToOne3(unittest.TestCase):
def setUp(self):
self.db = Database('sqlite', ':memory:')
class Person(self.db.Entity):
name = Required(unicode)
passport = Optional("Passport", cascade_delete=True)
class Passport(self.db.Entity):
code = Required(unicode)
person = Required("Person")
self.db.generate_mapping(create_tables=True)
with db_session:
p1 = Person(name='John')
Passport(code='123', person=p1)
def tearDown(self):
self.db = None
@db_session
def test_1(self):
obj = select(p for p in self.db.Person if p.passport.id).first()
self.assertEqual(obj.name, 'John')
self.assertEqual(obj.passport.code, '123')
@db_session
def test_2(self):
select(p for p in self.db.Person if p.passport is None)[:]
sql = self.db.last_sql
self.assertEqual(sql, '''SELECT "p"."id", "p"."name"
FROM "Person" "p"
LEFT JOIN "Passport" "passport"
ON "p"."id" = "passport"."person"
WHERE "passport"."id" IS NULL''')
@db_session
def test_3(self):
select(p for p in self.db.Person if not p.passport)[:]
sql = self.db.last_sql
self.assertEqual(sql, '''SELECT "p"."id", "p"."name"
FROM "Person" "p"
LEFT JOIN "Passport" "passport"
ON "p"."id" = "passport"."person"
WHERE "passport"."id" IS NULL''')
@db_session
def test_4(self):
select(p for p in self.db.Person if p.passport)[:]
sql = self.db.last_sql
self.assertEqual(sql, '''SELECT "p"."id", "p"."name"
FROM "Person" "p"
LEFT JOIN "Passport" "passport"
ON "p"."id" = "passport"."person"
WHERE "passport"."id" IS NOT NULL''')
@db_session
def test_5(self):
p = self.db.Person.get(name='John')
p.delete()
flush()
sql = self.db.last_sql
self.assertEqual(sql, '''DELETE FROM "Person"
WHERE "id" = ?
AND "name" = ?''')
@raises_exception(ConstraintError, 'Cannot unlink Passport[1] from previous Person[1] object, because Passport.person attribute is required')
@db_session
def test_6(self):
p = self.db.Person.get(name='John')
self.db.Passport(code='456', person=p)
@raises_exception(ConstraintError, 'Cannot unlink Passport[1] from previous Person[1] object, because Passport.person attribute is required')
@db_session
def test7(self):
p2 = self.db.Person(name='Mike')
pas2 = self.db.Passport(code='456', person=p2)
commit()
p1 = self.db.Person.get(name='John')
pas2.person = p1
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "71db27c8f219980e0c3e970554fa1809",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 145,
"avg_line_length": 32.02197802197802,
"alnum_prop": 0.5816746739876458,
"repo_name": "Ahmad31/Web_Flask_Cassandra",
"id": "5e8c7582e6c75b49f3253b70044d4202b273f090",
"size": "2914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/pony/orm/tests/test_relations_one2one3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34860"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "86875"
},
{
"name": "JavaScript",
"bytes": "7232"
},
{
"name": "Jupyter Notebook",
"bytes": "181"
},
{
"name": "Python",
"bytes": "12265503"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
} |
"""Policy abstraction for TFModels."""
import abc
import gin
import numpy as np
import six
from tensor2robot.meta_learning import meta_tf_models
from tensor2robot.policies import policies
@six.add_metaclass(abc.ABCMeta)
class MetaLearningPolicy(policies.Policy):
"""Abstract class for Tensorflow-based Meta-learning policies.
"""
def reset_task(self):
pass
@abc.abstractmethod
def adapt(self, episode_data):
raise NotImplementedError()
@gin.configurable
class MAMLCEMPolicy(MetaLearningPolicy, policies.CEMPolicy):
"""CEM Policy that uses MAML/gradient descent for fast adaptation."""
# TODO(T2R_CONTRIBUTORS) Replace t2r_model with pack feature function.
def __init__(self,
t2r_model,
action_size = 2,
cem_iters = 3,
cem_samples = 64,
num_elites = 10,
**parent_kwargs):
self._cem_iters = cem_iters
self._cem_samples = cem_samples
self._action_size = action_size
self._num_elites = num_elites
super(MAMLCEMPolicy,
self).__init__(t2r_model, action_size, cem_iters, cem_samples,
num_elites, **parent_kwargs)
def reset_task(self):
self._prev_episode_data = None
def adapt(self, episode_data):
self._prev_episode_data = episode_data
def SelectAction(self, state, context, timestep):
# TODO(T2R_CONTRIBUTORS)
if self._prev_episode_data:
prediction_key = 'val_output'
else:
prediction_key = 'train_output'
def objective_fn(samples):
"""The CEM objective function.
Args:
samples: The samples we evaluate the network on.
Returns:
q_values: The predicted Q values.
"""
# TODO(chelseaf): This is the inefficient way to do it. Would be faster
# to tile after the conv layers.
cem_state = np.tile(np.expand_dims(state, 0), [samples.shape[0], 1, 1, 1])
np_inputs = self._t2r_model.pack_features(cem_state,
self._prev_episode_data,
timestep, samples)
q_values = self._predictor.predict(np_inputs)[prediction_key]
if not self._prev_episode_data:
q_values *= 0
return q_values[0]
action, _ = self.get_cem_action(objective_fn)
return action
@gin.configurable
class MAMLRegressionPolicy(MetaLearningPolicy, policies.RegressionPolicy):
"""Actor network policy that uses MAML/gradient descent for fast adaptation.
This is basically the same as the RL2 policy (only changed t2r_model)
"""
def reset_task(self):
self._prev_episode_data = None
def adapt(self, episode_data):
self._prev_episode_data = episode_data
def sample_action(self, obs, explore_prob):
del explore_prob
action = self.SelectAction(obs, None, None)
# Replay writers require the is_demo flag when forming MetaExamples.
debug = {'is_demo': False}
return action, debug
def SelectAction(self, state, context, timestep):
np_features = self._t2r_model.pack_features(state, self._prev_episode_data,
timestep)
# This key must be 'inference_output' b.c. MAMLModel performs a check for
# this key.
action = self._predictor.predict(np_features)['inference_output']
# TODO(allanz): Rank 4 actions are due to VRGripperRegressionModel having
# an additional time dimension. Remove this once we have a better way to
# handle multiple timesteps.
if len(action.shape) == 4:
return action[0, 0, 0]
elif len(action.shape) == 3:
return action[0, 0]
else:
raise ValueError('Invalid action rank.')
@gin.configurable
class FixedLengthSequentialRegressionPolicy(
MetaLearningPolicy, policies.RegressionPolicy):
"""Fixed Episode Length sequential policy. a_t is t'th output of model."""
def reset_task(self):
# prev_episode_data is the conditioning episode, e.g. a demo.
self._prev_episode_data = None
def adapt(self, episode_data):
self._prev_episode_data = episode_data
def reset(self):
# current episode data is the temporal context for the current episode.
self._current_episode_data = None
self._t = 0
def SelectAction(self, state, context, timestep):
np_features = self._t2r_model.pack_features(state,
self._prev_episode_data,
self._current_episode_data,
self._t)
# Action is [batch, inference_episode, T, action_dim].
action = self._predictor.predict(np_features)['inference_output']
self._current_episode_data = np_features
assert len(action.shape) == 4
a = action[0, 0, self._t]
self._t += 1
return a
@gin.configurable
class ScheduledExplorationMAMLRegressionPolicy(
MetaLearningPolicy, policies.ScheduledExplorationRegressionPolicy):
"""Like MAMLRegressionPolicy, but with scheduled action noise for exploration.
"""
def reset_task(self):
self._prev_episode_data = None
def adapt(self, episode_data):
self._prev_episode_data = episode_data
def sample_action(self, obs, explore_prob):
del explore_prob
action = self.SelectAction(obs, None, None)
# Replay writers require the is_demo flag when forming MetaExamples.
debug = {'is_demo': False}
return action, debug
def SelectAction(self, state, context, timestep):
del context
np_features = self._t2r_model.pack_features(state, self._prev_episode_data,
timestep)
action = self._predictor.predict(np_features)['inference_output']
# TODO(allanz): Rank 4 actions are due to VRGripperRegressionModel having
# an additional time dimension. Remove this once we have a better way to
# handle multiple timesteps.
if len(action.shape) == 4:
action = action[0, 0, 0]
elif len(action.shape) == 3:
action = action[0, 0]
else:
raise ValueError('Invalid action rank.')
return action + self.get_noise()
| {
"content_hash": "ccd9110a10a7e213227192015b011af9",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 32.93010752688172,
"alnum_prop": 0.6435918367346939,
"repo_name": "google-research/tensor2robot",
"id": "60c75b9c4f12ba560962a00af54f25d85b952b34",
"size": "6730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meta_learning/meta_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1124212"
}
],
"symlink_target": ""
} |
import httplib
try:
import simplejson as json
except ImportError:
import json
import os
import sys
from urlparse import urljoin
try:
import requests
except ImportError:
raise ImportError('Missing dependency "requests". Do ``pip install requests``.')
try:
import yaml
except ImportError:
raise ImportError('Missing dependency "pyyaml". Do ``pip install pyyaml``.')
# ST2 configuration
ST2_CONFIG_FILE = './config.yaml'
ST2_API_BASE_URL = 'http://localhost:9101/v1'
ST2_AUTH_BASE_URL = 'http://localhost:9100'
ST2_USERNAME = None
ST2_PASSWORD = None
ST2_AUTH_TOKEN = None
ST2_AUTH_PATH = 'tokens'
ST2_WEBHOOKS_PATH = 'webhooks/st2/'
ST2_TRIGGERS_PATH = 'triggertypes/'
ST2_TRIGGERTYPE_PACK = 'sensu'
ST2_TRIGGERTYPE_NAME = 'event_handler'
ST2_TRIGGERTYPE_REF = '.'.join([ST2_TRIGGERTYPE_PACK, ST2_TRIGGERTYPE_NAME])
REGISTERED_WITH_ST2 = False
OK_CODES = [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]
def _create_trigger_type():
try:
url = _get_st2_triggers_url()
payload = {
'name': ST2_TRIGGERTYPE_NAME,
'pack': ST2_TRIGGERTYPE_PACK,
'description': 'Trigger type for sensu event handler.'
}
# sys.stdout.write('POST: %s: Body: %s\n' % (url, payload))
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
post_resp = requests.post(url, data=json.dumps(payload), headers=headers)
except:
sys.stderr.write('Unable to register trigger type with st2.')
raise
else:
status = post_resp.status_code
if status not in OK_CODES:
sys.stderr.write('Failed to register trigger type with st2. HTTP_CODE: %d\n' %
status)
raise
else:
sys.stdout.write('Registered trigger type with st2.\n')
def _get_auth_url():
return urljoin(ST2_AUTH_BASE_URL, ST2_AUTH_PATH)
def _get_auth_token():
global ST2_AUTH_TOKEN
auth_url = _get_auth_url()
try:
resp = requests.post(auth_url, json.dumps({'ttl': 5 * 60}),
auth=(ST2_USERNAME, ST2_PASSWORD))
except:
raise Exception('Cannot get auth token from st2. Will try unauthed.')
else:
ST2_AUTH_TOKEN = resp.json()['token']
def _register_with_st2():
global REGISTERED_WITH_ST2
try:
url = urljoin(_get_st2_triggers_url(), ST2_TRIGGERTYPE_REF)
# sys.stdout.write('GET: %s\n' % url)
if not ST2_AUTH_TOKEN:
_get_auth_token()
if ST2_AUTH_TOKEN:
get_resp = requests.get(url, headers={'X-Auth-Token': ST2_AUTH_TOKEN})
else:
get_resp = requests.get(url)
if get_resp.status_code != httplib.OK:
_create_trigger_type()
else:
body = json.loads(get_resp.text)
if len(body) == 0:
_create_trigger_type()
except:
raise
else:
REGISTERED_WITH_ST2 = True
def _get_st2_triggers_url():
url = urljoin(ST2_API_BASE_URL, ST2_TRIGGERS_PATH)
return url
def _get_st2_webhooks_url():
url = urljoin(ST2_API_BASE_URL, ST2_WEBHOOKS_PATH)
return url
def _post_event_to_st2(url, body):
headers = {}
headers['X-ST2-Integration'] = 'sensu.'
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
try:
sys.stdout.write('POST: url: %s, body: %s\n' % (url, body))
r = requests.post(url, data=json.dumps(body), headers=headers)
except:
sys.stderr.write('Cannot connect to st2 endpoint.')
else:
status = r.status_code
if status not in OK_CODES:
sys.stderr.write('Failed posting sensu event to st2. HTTP_CODE: %d\n' % status)
else:
sys.stdout.write('Sent sensu event to st2. HTTP_CODE: %d\n' % status)
def main(args):
body = {}
body['trigger'] = ST2_TRIGGERTYPE_REF
body['payload'] = json.loads(sys.stdin.read().strip())
_post_event_to_st2(_get_st2_webhooks_url(), body)
if __name__ == '__main__':
try:
if not os.path.exists(ST2_CONFIG_FILE):
sys.stderr.write('Configuration file not found. Exiting.\n')
sys.exit(1)
with open(ST2_CONFIG_FILE) as f:
config = yaml.safe_load(f)
ST2_USERNAME = config['st2_username']
ST2_PASSWORD = config['st2_password']
ST2_API_BASE_URL = config['st2_api_base_url']
ST2_AUTH_BASE_URL = config['st2_auth_base_url']
if not REGISTERED_WITH_ST2:
_register_with_st2()
except:
sys.stderr.write('Failed registering with st2. Won\'t post event.\n')
else:
main(sys.argv)
| {
"content_hash": "63394f3ee30605701f298ec174eeca8b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 91,
"avg_line_length": 29.259036144578314,
"alnum_prop": 0.6001647107267861,
"repo_name": "lmEshoo/st2contrib",
"id": "a58753017f13e373805a049aa43b016262f98ef3",
"size": "4880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packs/sensu/etc/st2_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8530"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "444890"
},
{
"name": "Shell",
"bytes": "3635"
}
],
"symlink_target": ""
} |
import logging
from collections import defaultdict
from dvc.repo import locked
from dvc.repo.scm_context import scm_context
from dvc.scm.base import RevError
from .utils import (
exp_refs,
exp_refs_by_baseline,
remote_exp_refs,
remote_exp_refs_by_baseline,
)
logger = logging.getLogger(__name__)
@locked
@scm_context
def ls(repo, *args, rev=None, git_remote=None, all_=False, **kwargs):
from dvc.scm.git import Git
if rev:
try:
rev = repo.scm.resolve_rev(rev)
except RevError:
if not (git_remote and Git.is_sha(rev)):
# This could be a remote rev that has not been fetched yet
raise
elif not all_:
rev = repo.scm.get_rev()
results = defaultdict(list)
if rev:
if git_remote:
gen = remote_exp_refs_by_baseline(repo.scm, git_remote, rev)
else:
gen = exp_refs_by_baseline(repo.scm, rev)
for info in gen:
results[rev].append(info.name)
elif all_:
if git_remote:
gen = remote_exp_refs(repo.scm, git_remote)
else:
gen = exp_refs(repo.scm)
for info in gen:
results[info.baseline_sha].append(info.name)
return results
| {
"content_hash": "68dbec10f0a415f275ef0970943b4e06",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 74,
"avg_line_length": 25.24,
"alnum_prop": 0.5966719492868463,
"repo_name": "dmpetrov/dataversioncontrol",
"id": "812a39d6fc4a7bbac8dcf69d27681b16497df448",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dvc/repo/experiments/ls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127601"
},
{
"name": "Shell",
"bytes": "1677"
}
],
"symlink_target": ""
} |
from IPython.core.display import display_html
from IPython import get_ipython
comm = None
listeners = {}
def setListener(topic, callback):
listeners[topic] = callback
def dispatch(msg):
# Note: because of the way comms are run, you can't print in these functions.
# You can, however, hijack the comm_manager's logger, which prints to the kernel
# stdout (wherever you have it running.)
# e.g. get_ipython().kernel.comm_manager.log.error(...)
msg = msg['content']['data']
if listeners.has_key(msg['topic']):
listeners[msg['topic']](msg['data'])
def handle_open(_comm, msg):
global comm
assert msg['content']['data'] == 'ipycomms.opened'
comm = _comm
_comm.on_msg(dispatch)
def send(topic, data):
msg = {'topic': topic, 'data': data}
comm.send(msg)
# The channel name is arbitrary.
get_ipython().kernel.comm_manager.register_target('ipycomms.channel', handle_open)
display_html('''
<script>
IPython.ipycomms = {
init: function(){
console.log("[ipycomms] initializing")
this.comm = IPython.notebook.kernel.comm_manager.new_comm(
'ipycomms.channel',
"ipycomms.opened", // Initial data sent with open (arbitrary)
function(){}, // Callbacks
"ipycomms.meta") // Metadata (arbitrary)
this.comm.on_msg(_.bind(this.dispatch, this))
},
dispatch: function(msg){
msg = msg.content.data;
if (!this.topicHandlers.hasOwnProperty(msg.topic)){
console.log("[ipycomms] topic with no callback", msg.topic)
return;
}
this.topicHandlers[msg.topic](msg.data)
},
topicHandlers: {},
setListener: function(topic, callback){
this.topicHandlers[topic] = callback
},
send: function(topic, data){
this.comm.send({topic: topic, data: data})
}
}
IPython.ipycomms.init()
</script>
''', raw=True)
print "Injected script."
| {
"content_hash": "53430701ea57455331837a0b6907bda5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 28.91176470588235,
"alnum_prop": 0.6230925737538149,
"repo_name": "sbirch/ipycomms",
"id": "af4155d299e3741f6529eb85da5ed2716fc7b456",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipycomms/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2336"
}
],
"symlink_target": ""
} |
"""Keras legacy SavedModel saving."""
import os
import tensorflow.compat.v2 as tf
from absl import logging
from keras import backend
from keras.protobuf import saved_metadata_pb2
from keras.protobuf import versions_pb2
from keras.saving.legacy import saving_utils
from keras.saving.legacy import serialization
from keras.saving.legacy.saved_model import constants
from keras.saving.legacy.saved_model import save_impl
from keras.saving.legacy.saved_model import utils
from keras.utils.generic_utils import LazyLoader
from keras.utils.io_utils import ask_to_proceed_with_overwrite
# isort: off
from tensorflow.python.saved_model import save as save_lib
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
base_layer = LazyLoader("base_layer", globals(), "keras.engine.base_layer")
training_lib = LazyLoader("training_lib", globals(), "keras.engine.training")
def save(
model,
filepath,
overwrite,
include_optimizer,
signatures=None,
options=None,
save_traces=True,
):
"""Saves a model as a SavedModel to the filepath.
Args:
model: Keras model instance to be saved.
filepath: String path to save the model.
overwrite: whether to overwrite the existing filepath.
include_optimizer: If True, save the model's optimizer state.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: (only applies to SavedModel format) `tf.saved_model.SaveOptions`
object that specifies options for saving to SavedModel.
save_traces: (only applies to SavedModel format) When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Defaults to `True`. Disabling this will decrease serialization time
and reduce file size, but it requires that all custom layers/models
implement a `get_config()` method.
Raises:
ValueError: if the model's inputs have not been defined.
"""
# If file exists and should not be overwritten.
if not overwrite and os.path.exists(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if save_traces:
if save_impl.should_skip_serialization(model):
saving_utils.raise_model_input_error(model)
if not include_optimizer:
orig_optimizer = model.optimizer
model.optimizer = None
# TODO(b/180760306) Change to del model.optimizer if Layer's __delattr__
# calls AutoTrackable's __delattr__.
model._delete_tracking("optimizer")
# Trace all functions and signatures with `training=0` instead of using an
# already-set learning phase placeholder.
# This is needed for compatibility reasons until learning phase setting
# is removed from the public apis.
with serialization.SharedObjectSavingScope():
with backend.deprecated_internal_learning_phase_scope(0):
with utils.keras_option_scope(save_traces):
saved_nodes, node_paths = save_lib.save_and_return_nodes(
model, filepath, signatures, options
)
# Save all metadata to a separate file in the SavedModel directory.
metadata = generate_keras_metadata(saved_nodes, node_paths)
with tf.io.gfile.GFile(
tf.io.gfile.join(filepath, constants.SAVED_METADATA_PATH), "wb"
) as w:
w.write(metadata.SerializeToString(deterministic=True))
if not include_optimizer:
model.optimizer = orig_optimizer
def generate_keras_metadata(saved_nodes, node_paths):
"""Constructs a KerasMetadata proto with the metadata of each object."""
metadata = saved_metadata_pb2.SavedMetadata()
for node_id, node in enumerate(saved_nodes):
if isinstance(node, base_layer.Layer):
path = node_paths[node]
if not path:
node_path = "root"
else:
node_path = f"root.{'.'.join([ref.name for ref in path])}"
metadata.nodes.add(
node_id=node_id,
node_path=node_path,
version=versions_pb2.VersionDef(
producer=2, min_consumer=1, bad_consumers=[]
),
identifier=node._object_identifier,
metadata=node._tracking_metadata,
)
# Log warning if the node's class name conflicts with a Keras
# built-in object.
class_name = node.__class__.__name__
from keras.layers import serialization as layers_serialization
builtin_layer = layers_serialization.get_builtin_layer(class_name)
if builtin_layer:
if not isinstance(node, builtin_layer):
logging.warning(
"%s has the same name '%s' as a built-in Keras "
"object. Consider renaming %s to avoid naming "
"conflicts when loading with "
"`tf.keras.models.load_model`. "
"If renaming is not possible, pass "
"the object in the `custom_objects` "
"parameter of the load "
"function.",
node,
class_name,
node.__class__,
)
return metadata
| {
"content_hash": "16605bbff029c6286eb965dbb1756703",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 39.32167832167832,
"alnum_prop": 0.6343588831584563,
"repo_name": "keras-team/keras",
"id": "601f4c089ab4d51f4d81824c7020dd8501350efe",
"size": "6312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/saving/legacy/saved_model/save.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
} |
To install a new module do this
1. Download the package and make sure its in downloads
2. Go into that file through terminal
3. put in ' pip3.4 install matplotlib '
where you replace matplotlib with the name of the package
| {
"content_hash": "928851a333055a0908f8d93cef1d761a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 38.5,
"alnum_prop": 0.7575757575757576,
"repo_name": "mannion9/Intro-to-Python",
"id": "b7f7421064071519f18ee496c8b72832f0777d53",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Install Module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64621"
}
],
"symlink_target": ""
} |
"""Metadiscourse.
---
layout: post
source: Pinker's book on writing
source_url: ???
title: metadiscourse
date: 2014-06-10 12:31:19
categories: writing
---
Points out metadiscourse.
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "pinker.metadiscourse"
msg = "Excessive metadiscourse."
metadiscourse = [
"The preceeding discussion",
"The rest of this article",
"This chapter discusses",
"The preceding paragraph demonstrated",
"The previous section analyzed",
]
return existence_check(text, metadiscourse, err, msg)
| {
"content_hash": "4e09142747b02947f1ba9b5660e79a3a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 57,
"avg_line_length": 21.28125,
"alnum_prop": 0.6549192364170338,
"repo_name": "amperser/proselint",
"id": "477a5812ecc936b868c1477b762f55d679ef1815",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proselint/checks/misc/metadiscourse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2558"
},
{
"name": "HTML",
"bytes": "241413"
},
{
"name": "JavaScript",
"bytes": "249832"
},
{
"name": "Procfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "333207"
},
{
"name": "Ruby",
"bytes": "364"
},
{
"name": "SCSS",
"bytes": "30668"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
} |
"""
WSGI config for protocolle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "protocolle.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# from dj_static import Cling
# application = Cling(get_wsgi_application())
| {
"content_hash": "6bb3b91f86cc88d6944b23f7d8f2d127",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 26.27777777777778,
"alnum_prop": 0.7695560253699789,
"repo_name": "klebercode/protocolle",
"id": "9ef3f2e76d3edd0e494b707c135db599bc1e852c",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protocolle/wsgi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "511"
},
{
"name": "HTML",
"bytes": "25408"
},
{
"name": "JavaScript",
"bytes": "3379"
},
{
"name": "Python",
"bytes": "70100"
}
],
"symlink_target": ""
} |
import os
import re
import subprocess
from django.utils.text import slugify
from django.conf import settings
from django.core.cache import cache
# These options are passed to Fabric as: fab task --abort-on-prompts=True --user=root ...
fabric_special_options = ['no_agent', 'forward-agent', 'config', 'disable-known-hosts', 'keepalive',
'password', 'parallel', 'no-pty', 'reject-unknown-hosts', 'skip-bad-hosts', 'timeout',
'command-timeout', 'user', 'warn-only', 'pool-size', 'key_filename']
def check_output(command, shell=False):
executable = None
if shell:
executable = getattr(settings, 'SHELL', '/bin/sh')
return subprocess.check_output(command, shell=shell, executable=executable)
def check_output_with_ssh_key(command):
if getattr(settings, 'GIT_SSH_KEY_LOCATION', None):
return check_output('ssh-agent bash -c "ssh-add {};{}"'.format(settings.GIT_SSH_KEY_LOCATION, command),
shell=True)
else:
return check_output([command], shell=True)
def update_project_git(project, cache_dir, repo_dir):
if not os.path.exists(repo_dir):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
check_output_with_ssh_key('git clone {} {}'.format(project.repo_url, repo_dir))
else:
check_output_with_ssh_key(
'cd {0};git stash;git pull'.format(repo_dir)
)
def setup_virtual_env_if_needed(repo_dir):
env_dir = os.path.join(repo_dir, 'env')
if not os.path.exists(env_dir):
os.makedirs(env_dir)
check_output("virtualenv {}".format(env_dir), shell=True)
def update_project_requirements(project, repo_dir, activate_loc):
pip_installs = ' '.join(project.fabfile_requirements.splitlines())
check_output_with_ssh_key('source {} && cd {};pip install {}'.format(activate_loc, repo_dir, pip_installs))
def get_fabfile_path(project):
if project.use_repo_fabfile:
cache_key = 'project_{}_fabfile_path'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
cache_dir = os.path.join(settings.PUBLIC_DIR, '.repo_caches')
repo_dir = os.path.join(cache_dir, slugify(project.name))
update_project_git(project, cache_dir, repo_dir)
setup_virtual_env_if_needed(repo_dir)
activate_loc = os.path.join(repo_dir, 'env', 'bin', 'activate')
update_project_requirements(project, repo_dir, activate_loc)
result = os.path.join(repo_dir, 'fabfile.py'), activate_loc
cache.set(cache_key, result, settings.FABRIC_TASK_CACHE_TIMEOUT)
return result
else:
return settings.FABFILE_PATH, None
def parse_task_details(name, task_output):
lines = task_output.splitlines()
docstring = '\n'.join([line.strip() for line in lines[2:-2]]).strip()
arguments_line = lines[-2].strip()
if docstring == 'No docstring provided':
docstring = None
arguments_line = arguments_line[11:].strip()
arguments = []
if arguments_line:
for arg in arguments_line.split(', '):
m = re.match(r"^([^=]+)(=(\'?)([^']*)\3)?$", arg)
if m.group(2): # found argument with default value
if m.group(3) == "'": # default value is a string
arguments.append((m.group(1), m.group(4)))
else: # found an argument with some other default value.
# all fab arguments are translated to strings, so this doesnt make sense. Ignore the default.
arguments.append(m.group(1))
else:
arguments.append(m.group(1))
return name, docstring, arguments
def get_fabric_tasks(project):
"""
Generate a list of fabric tasks that are available
"""
cache_key = 'project_{}_fabfile_tasks'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
try:
fabfile_path, activate_loc = get_fabfile_path(project)
if activate_loc:
output = check_output('source {};fab --list --list-format=short --fabfile={}'.format(activate_loc, fabfile_path), shell=True)
else:
output = check_output(['fab', '--list', '--list-format=short', '--fabfile={}'.format(fabfile_path)])
lines = output.splitlines()
tasks = []
for line in lines:
name = line.strip()
if activate_loc:
o = check_output(
'source {};fab --display={} --fabfile={}'.format(activate_loc, name, fabfile_path),
shell=True
)
else:
o = check_output(
['fab', '--display={}'.format(name), '--fabfile={}'.format(fabfile_path)]
)
tasks.append(parse_task_details(name, o))
cache.set(cache_key, tasks, settings.FABRIC_TASK_CACHE_TIMEOUT)
except Exception as e:
tasks = []
return tasks
def get_task_details(project, task_name):
for details in get_fabric_tasks(project):
if details[0] == task_name:
return details
return None
def clean_key_string(key):
key = key.replace('"', '\\"') # escape double quotes
key = key.replace(',', '\,') # escape commas, that would be adding a new value
key = key.replace('=', '\=') # escape = because that would be setting a new key
return key
def clean_value_string(value):
value = value.replace('"', '\\"') # escape double quotes
value = value.replace(',', '\,') # escape commas, that would be adding a new value
value = value.replace('=', '\=') # escape = because that would be setting a new key
return value
def clean_arg_key_string(key):
# this has to be a valid python function argument, so we can get pretty strict here
key = re.sub(r'[^0-9a-zA-Z_]', '', key) # remove anything that isn't a number, letter, or underscore
return key
def get_key_value_string(key, config):
key = clean_key_string(key)
if config.data_type == config.BOOLEAN_TYPE:
return key + ('' if config.get_value() else '=')
elif config.data_type == config.NUMBER_TYPE:
return key + '=' + str(config.get_value())
else:
return '{}={}'.format(key, clean_value_string(config.get_value()))
def update_config_values_from_session(configs, session):
configs = configs.copy()
for key, config in configs.iteritems():
if session.get('configuration_values', {}).get(key, None) is not None:
config.set_value(session['configuration_values'][key])
del session['configuration_values'][key]
arg_values = session.get('configuration_values', {})
return configs, arg_values
def build_command(deployment, session, abort_on_prompts=True):
# Get the dictionary of configurations for this stage
configs = deployment.stage.get_configurations()
configs, arg_values = update_config_values_from_session(configs, session)
task_args = [key for key, config in configs.iteritems() if config.task_argument and config.task_name == deployment.task.name]
task_configs = [key for key, config in configs.iteritems() if not config.task_argument]
command_to_config = {x.replace('-', '_'): x for x in fabric_special_options}
# Take the special env variables out
normal_task_configs = list(set(task_configs) - set(command_to_config.keys()))
# Special ones get set a different way
special_task_configs = list(set(task_configs) & set(command_to_config.keys()))
command = 'fab ' + deployment.task.name
task_details = get_task_details(deployment.stage.project, deployment.task.name)
task_args = list(set(task_args + [x[0] if isinstance(x, tuple) else x for x in task_details[2]]))
if task_args:
key_value_strings = []
for key in task_args:
if key in configs:
value = unicode(configs[key].get_value())
elif key in arg_values:
value = unicode(arg_values[key])
else:
continue
cleaned_key = clean_arg_key_string(key)
value = clean_value_string(value)
key_value_strings.append('{}="{}"'.format(cleaned_key, value))
if key_value_strings:
command += ':'
command += ','.join(key_value_strings)
if normal_task_configs:
command += ' --set '
command += '"' + ','.join(get_key_value_string(key, configs[key]) for key in normal_task_configs) + '"'
if special_task_configs:
for key in special_task_configs:
if key == 'key_filename':
command += ' -i ' + configs[key].get_value()
else:
command += ' --' + get_key_value_string(command_to_config[key], configs[key])
if abort_on_prompts:
command += ' --abort-on-prompts'
hosts = deployment.stage.hosts.values_list('name', flat=True)
if hosts:
command += ' --hosts=' + ','.join(hosts)
fabfile_path, active_loc = get_fabfile_path(deployment.stage.project)
command += ' --fabfile={}'.format(fabfile_path)
if active_loc:
return 'source {};'.format(active_loc) + ' ' + command
else:
return command | {
"content_hash": "2db4abb07538bee1c4c4ed830acb0a33",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 137,
"avg_line_length": 34.8544776119403,
"alnum_prop": 0.6065731720372551,
"repo_name": "gvangool/fabric-bolt",
"id": "aff975c0c9e94ffa3dcc8f2be0f26e2bca6b7f0c",
"size": "9341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabric_bolt/projects/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "838"
},
{
"name": "HTML",
"bytes": "63357"
},
{
"name": "JavaScript",
"bytes": "102422"
},
{
"name": "Python",
"bytes": "200391"
}
],
"symlink_target": ""
} |
from splinter.exceptions import ElementDoesNotExist
class ElementList:
"""Collection of elements.
Each member of the collection is by default an instance
of :class:`ElementAPI <splinter.driver.ElementAPI>`.
Beyond the traditional list methods, ``ElementList`` provides some
other methods, listed below.
There is a peculiar behavior on ElementList: you never get an
``IndexError``. Instead, you get an :class:`ElementDoesNotExist
<splinter.exceptions.ElementDoesNotExist>` exception when trying to
access a non-existent item:
>>> element_list = ElementList([])
>>> element_list[0] # raises ElementDoesNotExist
"""
def __init__(self, list, driver=None, find_by=None, query=None) -> None: # NOQA: A002
self._container = []
self._container.extend(list)
self.driver = driver
self.find_by = find_by
self.query = query
def __getitem__(self, index):
if not isinstance(index, int) and not isinstance(index, slice):
return self.first[index]
try:
return self._container[index]
except IndexError:
raise ElementDoesNotExist(
u'no elements could be found with {0} "{1}"'.format(
self.find_by, self.query
)
)
@property
def first(self):
"""An alias to the first element of the list.
Example:
>>> assert element_list[0] == element_list.first
"""
return self[0]
@property
def last(self):
"""An alias to the last element of the list.
Example:
>>> assert element_list[-1] == element_list.last
"""
return self[-1]
def is_empty(self) -> bool:
"""Check if the ElementList is empty.
Returns:
bool: True if the list is empty, else False
"""
return len(self) == 0
def __getattr__(self, name):
try:
return getattr(self.first, name)
except AttributeError:
try:
return getattr(self._container, name)
except AttributeError:
raise AttributeError(
u"'{0}' object has no attribute '{1}'".format(
self.__class__.__name__, name
)
)
def __iter__(self):
for item in self._container:
yield item
def __len__(self) -> int:
"""__len__ checks the internal container."""
return len(self._container)
def __repr__(self) -> str:
"""Return the repr of the internal container."""
return repr(self._container)
| {
"content_hash": "95d8fba712d9e4a183259eabee89d4be",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 90,
"avg_line_length": 28.978494623655912,
"alnum_prop": 0.5573283858998145,
"repo_name": "cobrateam/splinter",
"id": "81d0f6c6a283e7b67520fdf2c0dded873091689d",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splinter/element_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "15004"
},
{
"name": "Makefile",
"bytes": "809"
},
{
"name": "Python",
"bytes": "271187"
},
{
"name": "Shell",
"bytes": "615"
}
],
"symlink_target": ""
} |
import os
import six
from tests import base
from girder import events
from girder.constants import ROOT_DIR
from girder.utility.model_importer import ModelImporter
from PIL import Image
def setUpModule():
base.enabledPlugins.append('thumbnails')
base.startServer()
def tearDownModule():
base.stopServer()
class ThumbnailsTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create some test documents with an item
admin = {
'email': 'admin@email.com',
'login': 'adminlogin',
'firstName': 'Admin',
'lastName': 'Last',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
folders = self.model('folder').childFolders(
parent=self.admin, parentType='user', user=self.admin)
for folder in folders:
if folder['public'] is True:
self.publicFolder = folder
else:
self.privateFolder = folder
events.unbind('thumbnails.create', 'test')
def testThumbnailCreation(self):
path = os.path.join(ROOT_DIR, 'clients', 'web', 'static', 'img',
'Girder_Mark.png')
with open(path, 'rb') as file:
data = file.read()
# Upload the girder logo to the admin's public folder
resp = self.request(
path='/file', method='POST', user=self.admin, params={
'parentType': 'folder',
'parentId': self.publicFolder['_id'],
'name': 'test.png',
'size': len(data)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'test.png', data)]
resp = self.multipartRequest(
path='/file/chunk', fields=fields, files=files, user=self.admin)
self.assertStatusOk(resp)
itemId = resp.json['itemId']
fileId = resp.json['_id']
params = {
'fileId': fileId,
'width': 64,
'attachToId': str(self.admin['_id']),
'attachToType': 'user'
}
# We shouldn't be able to add thumbnails without write access to the
# target resource.
resp = self.request(
path='/thumbnail', method='POST', user=self.user, params=params)
self.assertStatus(resp, 403)
# Should complain if we don't pass a width or a height
del params['width']
params['attachToId'] = str(self.user['_id'])
resp = self.request(
path='/thumbnail', method='POST', user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'You must specify a valid width,'
' height, or both.')
# Set a width, we should now correctly have a thumbnail
params['width'] = 64
resp = self.request(
path='/thumbnail', method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
job = resp.json
from girder.plugins.jobs.constants import JobStatus
self.assertEqual(job['status'], JobStatus.SUCCESS)
self.user = self.model('user').load(self.user['_id'], force=True)
self.assertEqual(len(self.user['_thumbnails']), 1)
thumbnailId = self.user['_thumbnails'][0]
resp = self.request('/file/%s/download' % str(thumbnailId),
isJson=False)
data = self.getBody(resp, text=False)
image = Image.open(six.BytesIO(data))
self.assertEqual(image.size, (64, 64))
# Delete the thumbnail, it should be removed from the user thumb list
resp = self.request('/file/%s' % str(thumbnailId), method='DELETE',
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(self.model('file').load(thumbnailId), None)
self.user = self.model('user').load(self.user['_id'], force=True)
self.assertEqual(len(self.user['_thumbnails']), 0)
# Attach a thumbnail to the admin's public folder
resp = self.request(
path='/thumbnail', method='POST', user=self.admin, params={
'width': 64,
'height': 32,
'crop': True,
'attachToId': str(self.publicFolder['_id']),
'attachToType': 'folder',
'fileId': fileId
})
self.assertStatusOk(resp)
self.publicFolder = self.model('folder').load(
self.publicFolder['_id'], force=True)
self.assertEqual(len(self.publicFolder['_thumbnails']), 1)
thumbnailId = self.publicFolder['_thumbnails'][0]
resp = self.request('/file/%s/download' % str(thumbnailId),
isJson=False)
data = self.getBody(resp, text=False)
image = Image.open(six.BytesIO(data))
self.assertEqual(image.size, (64, 32))
# Deleting the public folder should delete the thumbnail as well
self.model('folder').remove(self.publicFolder)
self.assertEqual(self.model('file').load(thumbnailId), None)
def testCreateThumbnailOverride(self):
def override(event):
# Override thumbnail creation -- just grab the first 4 bytes
file = event.info['file']
streamFn = event.info['streamFn']
stream = streamFn()
contents = b''.join(stream())
uploadModel = ModelImporter.model('upload')
upload = uploadModel.createUpload(
user=self.admin, name='magic', parentType=None, parent=None,
size=4)
thumbnail = uploadModel.handleChunk(upload, contents[:4])
event.addResponse({
'file': thumbnail
})
event.preventDefault()
events.bind('thumbnails.create', 'test', override)
path = os.path.join(ROOT_DIR, 'clients', 'web', 'static', 'img',
'Girder_Mark.png')
with open(path, 'rb') as file:
data = file.read()
# Upload the girder logo to the admin's public folder
resp = self.request(
path='/file', method='POST', user=self.admin, params={
'parentType': 'folder',
'parentId': self.publicFolder['_id'],
'name': 'test.png',
'size': len(data)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'test.png', data)]
resp = self.multipartRequest(
path='/file/chunk', fields=fields, files=files, user=self.admin)
self.assertStatusOk(resp)
itemId = resp.json['itemId']
fileId = resp.json['_id']
# Attach a thumbnail to the admin's public folder
resp = self.request(
path='/thumbnail', method='POST', user=self.admin, params={
'width': 64,
'height': 32,
'crop': True,
'attachToId': str(self.publicFolder['_id']),
'attachToType': 'folder',
'fileId': fileId
})
self.assertStatusOk(resp)
# Download the new thumbnail
folder = self.model('folder').load(self.publicFolder['_id'], force=True)
self.assertEqual(len(folder['_thumbnails']), 1)
thumbnail = self.model('file').load(folder['_thumbnails'][0], force=True)
self.assertEqual(thumbnail['attachedToType'], 'folder')
self.assertEqual(thumbnail['attachedToId'], folder['_id'])
# Its contents should be the PNG magic number
stream = self.model('file').download(thumbnail, headers=False)
self.assertEqual(b'\x89PNG', b''.join(stream()))
| {
"content_hash": "9643fa936ed22edc640c01803550364d",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 81,
"avg_line_length": 36.34361233480176,
"alnum_prop": 0.5536969696969697,
"repo_name": "chrismattmann/girder",
"id": "ab904f1aa5a724b9b895217d212037007334547d",
"size": "9039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/thumbnails/plugin_tests/thumbnail_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "36635"
},
{
"name": "CSS",
"bytes": "156740"
},
{
"name": "HTML",
"bytes": "161646"
},
{
"name": "JavaScript",
"bytes": "1358011"
},
{
"name": "Mako",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "1202964"
},
{
"name": "Ruby",
"bytes": "9923"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
} |
from . import nxadapter
from . import community
from . import centrality
from _NetworKit import ParallelPartitionCoarsening
# external imports
import networkx
def save(name, dir="."):
""" Save a figure """
savefig(os.path.join(dir, "{0}.pdf".format(name)), bbox_inches="tight", transparent=True)
def coloringToColorList(G, coloring):
clist = []
nColors = len(coloring.keys())
for v in G.nodes():
clist.append(float(coloring[v]) / nColors)
return clist
def drawGraph(G, **kwargs):
""" Draws a graph via networkX. Passes additional arguments beyond the graph to networkx.draw(...).
By default, node sizes are scaled between 30 and 300 by node degree.
"""
nxG = nxadapter.nk2nx(G)
if not "node_size" in kwargs:
kwargs["node_size"] =[30+270*s for s in centrality.DegreeCentrality(G,True).run().scores()],
networkx.draw(nxG, **kwargs)
def drawCommunityGraph(G, zeta, **kwargs):
""" Draws the community graph for a given graph and partition. Passes any additional arguments to networkx.draw(...).
By default, node sizes are scaled between 30 and 500 by community size.
"""
cg = ParallelPartitionCoarsening(G,zeta)
cg.run() # convert communities to nodes
graph = cg.getCoarseGraph()
comGraph = nxadapter.nk2nx(graph)
if not "node_size" in kwargs:
sizes = list(zeta.subsetSizeMap().values())
max_size = max(sizes)
sizes = [elem/max_size for elem in sizes]
kwargs["node_size"] = [30+470*s for s in sizes]
networkx.draw(comGraph, **kwargs)
| {
"content_hash": "a361f412883e6100572c1744972ed589",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 118,
"avg_line_length": 31.51063829787234,
"alnum_prop": 0.712356515867657,
"repo_name": "fmaschler/networkit",
"id": "b8766baf1055354eac5594f646bc2538a5efc51c",
"size": "1497",
"binary": false,
"copies": "2",
"ref": "refs/heads/SCD-weighted",
"path": "networkit/viztasks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10112"
},
{
"name": "C++",
"bytes": "2589116"
},
{
"name": "CSS",
"bytes": "16109"
},
{
"name": "HTML",
"bytes": "10110"
},
{
"name": "JavaScript",
"bytes": "4583"
},
{
"name": "Jupyter Notebook",
"bytes": "35441"
},
{
"name": "Matlab",
"bytes": "238"
},
{
"name": "Python",
"bytes": "606841"
},
{
"name": "Shell",
"bytes": "846"
},
{
"name": "TeX",
"bytes": "5547"
}
],
"symlink_target": ""
} |
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.gen import LiteXModule
from litex_boards.platforms import xilinx_zcu106
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT40A256M16
from litedram.phy import usddrphy
from litepcie.phy.usppciephy import USPPCIEPHY
from litepcie.software import generate_litepcie_software
# CRG ----------------------------------------------------------------------------------------------
class _CRG(LiteXModule):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.cd_sys = ClockDomain()
self.cd_sys4x = ClockDomain()
self.cd_pll4x = ClockDomain()
self.cd_idelay = ClockDomain()
# # #
clk125 = platform.request("clk125")
rst = platform.request("rst")
self.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(self.rst | rst)
pll.register_clkin(clk125, 125e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_idelay, 500e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.specials += [
Instance("BUFGCE_DIV",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
]
self.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_idelay, cd_sys=self.cd_sys)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=125e6, with_led_chaser=True, with_pcie=False, **kwargs):
platform = xilinx_zcu106.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq, ident="LiteX SoC on ZCU106", **kwargs)
# CRG --------------------------------------------------------------------------------------
self.crg = _CRG(platform, sys_clk_freq)
# DDR4 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram"),
memtype = "DDR4",
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 500e6)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT40A256M16(sys_clk_freq, "1:4"),
size = 0x20000000,
l2_cache_size = kwargs.get("l2_size", 8192)
)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
self.pcie_phy = USPPCIEPHY(platform, platform.request("pcie_x4"),
speed = "gen3",
data_width = 128,
bar0_size = 0x20000)
self.add_pcie(phy=self.pcie_phy, ndmas=1)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.build.parser import LiteXArgumentParser
parser = LiteXArgumentParser(platform=xilinx_zcu106.Platform, description="LiteX SoC on ZCU106.")
parser.add_target_argument("--sys-clk-freq", default=125e6, type=float, help="System clock frequency.")
parser.add_target_argument("--with-pcie", action="store_true", help="Enable PCIe support")
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = args.sys_clk_freq,
with_pcie = args.with_pcie,
**parser.soc_argdict
)
builder = Builder(soc, **parser.builder_argdict)
if args.build:
builder.build(**parser.toolchain_argdict)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
if __name__ == "__main__":
main()
| {
"content_hash": "c8b8110702dea279c6773e4eae370e31",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 128,
"avg_line_length": 40.39823008849557,
"alnum_prop": 0.5051478641840088,
"repo_name": "litex-hub/litex-boards",
"id": "48f0c41720a2f9052ec45c439289af7046347b29",
"size": "4737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "litex_boards/targets/xilinx_zcu106.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1813530"
}
],
"symlink_target": ""
} |
from silon import * | {
"content_hash": "91b61176290c8f7f1c03737f07093fc1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 19,
"avg_line_length": 19,
"alnum_prop": 0.7894736842105263,
"repo_name": "marxus85/silon",
"id": "9653b7219f5242ce43ea72aba7e4752ac6b3e492",
"size": "19",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "silon/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "13229"
},
{
"name": "JavaScript",
"bytes": "17541"
},
{
"name": "Python",
"bytes": "12117"
}
],
"symlink_target": ""
} |
from operator import add, mul, sub, truediv
def arithmetic(a, b, operator):
ops = {'add': add, 'subtract': sub, 'multiply': mul, 'divide': truediv}
return ops[operator](a, b)
| {
"content_hash": "00219fec357a3b46690aec009a89f492",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 75,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.6486486486486487,
"repo_name": "the-zebulan/CodeWars",
"id": "5d9e5b4d5c2a3de9d822a4cfc1d57cc9af8e1d06",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_7/make_a_function_that_does_arithmetic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
"""
Map Interface Module
Copyright 2013 Rob "N3X15" Nelson <nexis@7chan.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os, itertools, sys, numpy, logging, hashlib
from byond.map.format import GetMapFormat, Load as LoadMapFormats
from byond.DMI import DMI
from byond.directions import SOUTH, IMAGE_INDICES
from byond.basetypes import Atom, BYONDString, BYONDValue, BYONDFileRef, BYOND2RGBA
# from byond.objtree import ObjectTree
from PIL import Image, ImageChops
# Cache
_icons = {}
_dmis = {}
LoadMapFormats()
# From StackOverflow
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
# Bytes
def tint_image(image, tint_color):
return ImageChops.multiply(image, Image.new('RGBA', image.size, tint_color))
class LocationIterator:
def __init__(self, _map):
self.map = _map
self.x = -1
self.y = 0
self.z = 0
self.max_z = len(self.map.zLevels)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.x += 1
zLev = self.map.zLevels[self.z]
if self.x >= zLev.width:
self.y += 1
self.x = 0
if self.y >= zLev.height:
self.z += 1
self.y = 0
if self.z >= self.max_z:
raise StopIteration
t = self.map.GetTileAt(self.x, self.y, self.z)
# print('{} = {}'.format((self.x,self.y,self.z),str(t)))
return t
class TileIterator:
def __init__(self, _map):
self.map = _map
self.pos = -1
self.max = len(self.map.tiles)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.pos += 1
if self.pos >= self.max:
raise StopIteration
t = self.map.tiles[self.pos]
#print('#{} = {}'.format(self.pos,str(t)))
return t
class AtomIterator:
def __init__(self, _map):
self.map = _map
self.pos = -1
self.max = len(self.map.instances)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.pos += 1
if self.pos >= len(self.max):
raise StopIteration
t = self.map.instances[self.pos]
# print('#{} = {}'.format(self.pos,str(t)))
return t
class Tile(object):
def __init__(self, _map, master=False):
# : Map's copy of the tile, used for tracking.
self.master = master
self.coords = (0, 0, 0)
self.origID = ''
self.ID = -1
self.instances = []
self.locations = []
self.frame = None
self.unselected_frame = None
self.areaSelected = True
self.log = logging.getLogger(__name__ + '.Tile')
self.map = _map
self._hash = None
self.orig_hash = None
def UpdateHash(self, no_map_update=False):
if self._hash is None:
# Why MD5? Because the shorter the string, the faster the comparison.
self._hash = hashlib.md5(str(self)).hexdigest()
if not no_map_update:
self.ID=self.map.UpdateTile(self)
if self.ID==-1:
raise Error('self.ID == -1')
def InvalidateHash(self):
if self._hash is not None:
self.orig_hash = self._hash
self._hash = None
def GetHash(self):
self.UpdateHash()
return self._hash
def RemoveAtom(self, atom, hash=True):
'''
:param Atom atom:
Atom to remove. Raises ValueError if not found.
'''
if atom is None: return
self.instances.remove(atom.ID)
self.InvalidateHash()
if hash: self.UpdateHash()
def AppendAtom(self, atom, hash=True):
'''
:param Atom atom:
Atom to add.
'''
if atom is None: return
atom.UpdateMap(self.map)
self.instances.append(atom.ID)
self.InvalidateHash()
if hash: self.UpdateHash()
def CountAtom(self, atom):
'''
:param Atom atom:
Atom to count.
:return int: Count of atoms
'''
return self.instances.count(atom.ID)
def copy(self, origID=False):
tile = self.map.CreateTile()
tile.ID = self.ID
tile.instances = [x for x in self.instances]
if origID:
tile.origID = self.origID
if not self._hash:
self.UpdateHash(no_map_update=True)
tile._hash = self._hash
return tile
def GetAtoms(self):
atoms = []
for id in self.instances:
if id is None:
continue
a = self.map.GetInstance(id)
if a is None:
self.log.debug('Unknown instance ID {}!'.format(id))
continue
atoms += [a]
return atoms
def SortAtoms(self):
return sorted(self.GetAtoms(), reverse=True)
def GetAtom(self, idx):
return self.map.GetInstance(self.instances[idx])
def GetInstances(self):
return self.instances
def rmLocation(self, coord, autoclean=True):
if coord in self.locations:
self.locations.remove(coord)
if autoclean and len(self.locations) == 0:
self.map.tiles[self.ID] = None # Mark ready for recovery
self.map._tile_idmap.pop(self.GetHash(), None)
def addLocation(self, coord):
if coord not in self.locations:
self.locations.append(coord)
def __str__(self):
return self._serialize()
def __ne__(self, tile):
return not self.__eq__(tile)
def __eq__(self, other):
return other and ((other._hash and self._hash and self._hash == other._hash) or (len(self.instances) == len(other.instances) and self.instances == other.instances))
# else:
# return all(self.instances[i] == other.instances[i] for i in xrange(len(self.instances)))
def _serialize(self):
return ','.join([str(i) for i in self.GetAtoms()])
def RenderToMapTile(self, passnum, basedir, renderflags, **kwargs):
img = Image.new('RGBA', (96, 96))
self.offset = (32, 32)
foundAPixelOffset = False
render_types = kwargs.get('render_types', ())
skip_alpha = kwargs.get('skip_alpha', False)
# for atom in sorted(self.GetAtoms(), reverse=True):
for atom in self.SortAtoms():
if len(render_types) > 0:
found = False
for path in render_types:
if atom.path.startswith(path):
found = True
if not found:
continue
aid = atom.ID
# Ignore /areas. They look like ass.
if atom.path.startswith('/area'):
if not (renderflags & MapRenderFlags.RENDER_AREAS):
continue
# We're going to turn space black for smaller images.
if atom.path == '/turf/space':
if not (renderflags & MapRenderFlags.RENDER_STARS):
continue
if 'icon' not in atom.properties:
logging.critical('UNKNOWN ICON IN {0} (atom #{1})'.format(self.origID, aid))
logging.info(atom.MapSerialize())
logging.info(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES))
continue
dmi_file = atom.properties['icon'].value
if 'icon_state' not in atom.properties:
# Grab default icon_state ('') if we can't find the one defined.
atom.properties['icon_state'] = BYONDString("")
state = atom.properties['icon_state'].value
direction = SOUTH
if 'dir' in atom.properties:
try:
direction = int(atom.properties['dir'].value)
except ValueError:
logging.critical('FAILED TO READ dir = ' + repr(atom.properties['dir'].value))
continue
icon_key = '{0}|{1}|{2}'.format(dmi_file, state, direction)
frame = None
pixel_x = 0
pixel_y = 0
if icon_key in _icons:
frame, pixel_x, pixel_y = _icons[icon_key]
else:
dmi_path = os.path.join(basedir, dmi_file)
dmi = None
if dmi_path in _dmis:
dmi = _dmis[dmi_path]
else:
try:
dmi = DMI(dmi_path)
dmi.loadAll()
_dmis[dmi_path] = dmi
except Exception as e:
print(str(e))
for prop in ['icon', 'icon_state', 'dir']:
print('\t{0}'.format(atom.dumpPropInfo(prop)))
pass
if dmi.img is None:
logging.warning('Unable to open {0}!'.format(dmi_path))
continue
if dmi.img.mode not in ('RGBA', 'P'):
logging.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode))
if direction not in IMAGE_INDICES:
logging.warn('Unrecognized direction {} on atom {} in tile {}!'.format(direction, atom.MapSerialize(), self.origID))
direction = SOUTH # DreamMaker property editor shows dir = 2. WTF?
frame = dmi.getFrame(state, direction, 0)
if frame == None:
# Get the error/default state.
frame = dmi.getFrame("", direction, 0)
if frame == None:
continue
if frame.mode != 'RGBA':
frame = frame.convert("RGBA")
pixel_x = 0
if 'pixel_x' in atom.properties:
pixel_x = int(atom.properties['pixel_x'].value)
pixel_y = 0
if 'pixel_y' in atom.properties:
pixel_y = int(atom.properties['pixel_y'].value)
_icons[icon_key] = (frame, pixel_x, pixel_y)
# Handle BYOND alpha and coloring
c_frame = frame
alpha = int(atom.getProperty('alpha', 255))
if skip_alpha:
alpha = 255
color = atom.getProperty('color', '#FFFFFF')
if alpha != 255 or color != '#FFFFFF':
c_frame = tint_image(frame, BYOND2RGBA(color, alpha))
img.paste(c_frame, (32 + pixel_x, 32 - pixel_y), c_frame) # Add to the top of the stack.
if pixel_x != 0 or pixel_y != 0:
if passnum == 0: return # Wait for next pass
foundAPixelOffset = True
if passnum == 1 and not foundAPixelOffset:
return None
if not self.areaSelected:
# Fade out unselected tiles.
bands = list(img.split())
# Excluding alpha band
for i in range(3):
bands[i] = bands[i].point(lambda x: x * 0.4)
img = Image.merge(img.mode, bands)
return img
class MapLayer:
def __init__(self, z, _map, height=255, width=255):
self.initial_load=False
self.map = _map
self.min = (0, 0)
self.max = (height - 1, width - 1)
self.tiles = None
self.Resize(height, width)
self.z = z
def GetTile(self, x, y):
# return self.tiles[y][x]
t = self.map.GetTileByID(self.tiles[x, y])
t.coords = (x, y, self.z)
return t
def SetTile(self, x, y, tile):
'''
:param x int:
:param y int:
:param tile Tile:
'''
'''
if not self.initial_load:
# Remove old tile.
oldid = self.tiles[x, y]
if oldid < len(self.map.instances):
t = self.map.tiles[oldid]
if t: t.rmLocation((x, y, self.z))
'''
# Set new tile.
if not self.initial_load:
tile.ID=self.map.UpdateTile(tile)
self.tiles[x, y] = tile.ID
#self.map.tiles[tile.ID].addLocation((x, y, self.z))
def SetTileID(self, x, y, newID):
'''
:param x int:
:param y int:
:param newID int:
'''
if newID is None:
raise Exception('newID cannot be None')
t = self.map.tiles[newID]
if t is None:
raise KeyError('Unknown tile #{}'.format(newID))
#self.SetTile(x, y, t)
'''
if not self.initial_load:
# Remove old tile.
oldid = self.tiles[x, y]
if oldid < len(self.map.instances):
t = self.map.tiles[oldid]
if t: t.rmLocation((x, y, self.z))
'''
self.tiles[x, y] = newID
#self.map.tiles[newID].addLocation((x, y, self.z))
def Resize(self, height, width):
self.height = height
self.width = width
basetile = self.map.basetile;
if self.tiles is None:
self.tiles = numpy.empty((height, width), int) # object)
for y in xrange(height):
for x in xrange(width):
self.SetTile(x, y, basetile)
else:
self.tiles.resize(height, width)
# self.tiles = [[Tile(self.map) for _ in xrange(width)] for _ in xrange(height)]
class MapRenderFlags:
RENDER_STARS = 1
RENDER_AREAS = 2
class Map:
def __init__(self, tree=None, **kwargs):
self.zLevels = []
self._instance_idmap = {} # md5 -> id
self._tile_idmap = {} # md5 -> id
self.basetile = Tile(self)
self.instances = [] # Atom
self.tiles = [] # Tile
self.DMIs = {}
self.tree = tree
self.generatedTexAtlas = False
self.selectedAreas = ()
self.whitelistTypes = None
self.forgiving_atom_lookups = kwargs.get('forgiving_atom_lookups', False)
self.log = logging.getLogger(__name__ + '.Map')
self.missing_atoms = set()
self.basetile.UpdateHash();
def ResetTilestore(self):
'''For loading maps. Resets tile data to a pristine state.'''
self.instances = [] # Atom
self.tiles = [] # Tile
self.basetile = None
def GetTileByID(self, tileID):
t = self.tiles[tileID]
if t is None:
return None
t = t.copy()
t.master = False
return t
def GetInstance(self, atomID):
a=None
try:
a = self.instances[atomID]
except IndexError as e:
self.log.critical('Unable to find instance {}!')
raise e
if a is None:
# print('WARNING: #{0} not found'.format(atomID))
return None
a = a.copy()
# a.master = False
return a
def UpdateTile(self, t):
'''
Update tile registry.
:param t Tile:
Tile to update.
:return Tile ID:
'''
thash = t.GetHash()
# if t.ID >= 0 and t.ID < len(self.tiles) and self.tiles[t.ID] is not None:
# self.tiles[t.ID].rmLocation(t.coords)
tiles_action = "-"
'''
if t in self.tiles:
t.ID = self.tiles.index(t)
else:
'''
idmap_action = "-"
if thash not in self._tile_idmap:
idmap_action = "Added"
t.ID = len(self.tiles)
self.tiles += [t.copy()]
self._tile_idmap[thash] = t.ID
tiles_action = "Added"
#print('Assigned ID #{} to tile {}'.format(t.ID,thash))
elif self._tile_idmap[thash] != t.ID:
t.ID = self._tile_idmap[thash]
idmap_action = "Updated"
#print('Updated tile {1} to ID #{0}'.format(t.ID,thash))
#print('Updated #{} - Tiles: {}, idmap: {}'.format(t.ID, thash, tiles_action, idmap_action))
self.tiles[t.ID].addLocation(t.coords)
return t.ID
def UpdateAtom(self, a):
'''
Update tile registry.
:param a Atom: Tile to update.
'''
thash = a.GetHash()
if a.ID and len(self.instances) < a.ID and self.instances[a.ID] is not None:
self.instances[a.ID].rmLocation(self, a.coords)
if thash not in self._instance_idmap:
a.ID = len(self.instances)
self.instances += [a.copy()]
self._instance_idmap[thash] = a.ID
#print('Assigned ID #{} to atom {}'.format(a.ID,thash))
else:
a.ID = self._instance_idmap[thash]
if a.coords is not None:
self.instances[a.ID].addLocation(a.coords)
return a.ID
def CreateZLevel(self, height, width, z= -1):
zLevel = MapLayer(z if z >= 0 else len(self.zLevels), self, height, width)
if z >= 0:
self.zLevels[z] = zLevel
else:
self.zLevels.append(zLevel)
return zLevel
def Atoms(self):
'''Iterates over all instances in the map.
'''
return AtomIterator(self)
def Tiles(self):
'''Iterates over all tiles of the map.
'''
return TileIterator(self)
def Locations(self):
return LocationIterator(self)
def Load(self, filename, **kwargs):
_, ext = os.path.splitext(filename)
fmt = kwargs.get('format', 'dmm2' if ext == 'dmm2' else 'dmm')
reader = GetMapFormat(self, fmt)
reader.Load(filename, **kwargs)
def Save(self, filename, **kwargs):
_, ext = os.path.splitext(filename)
fmt = kwargs.get('format', 'dmm2' if ext == 'dmm2' else 'dmm')
reader = GetMapFormat(self, fmt)
reader.Save(filename, **kwargs)
def writeMap2(self, filename, flags=0):
self.filename = filename
tileFlags = 0
atomFlags = 0
if flags & Map.WRITE_OLD_IDS:
tileFlags |= Tile.FLAG_USE_OLD_ID
atomFlags |= Atom.FLAG_USE_OLD_ID
padding = len(self.tileTypes[-1].ID2String())
with open(filename, 'w') as f:
f.write('// Atom Instances\n')
for atom in self.instances:
f.write('{0} = {1}\n'.format(atom.ID, atom.MapSerialize(atomFlags)))
f.write('// Tiles\n')
for tile in self.tileTypes:
f.write('{0}\n'.format(tile.MapSerialize2(tileFlags, padding)))
f.write('// Layout\n')
for z in self.zLevels.keys():
f.write('\n(1,1,{0}) = {{"\n'.format(z))
zlevel = self.zLevels[z]
for y in xrange(zlevel.height):
for x in xrange(zlevel.width):
tile = zlevel.GetTileAt(x, y)
if flags & Map.WRITE_OLD_IDS:
f.write(tile.origID)
else:
f.write(tile.ID2String(padding))
f.write("\n")
f.write('"}\n')
def GetTileAt(self, x, y, z):
'''
:param int x:
:param int y:
:param int z:
:rtype Tile:
'''
if z < len(self.zLevels):
return self.zLevels[z].GetTile(x, y)
def CopyTileAt(self, x, y, z):
'''
:param int x:
:param int y:
:param int z:
:rtype Tile:
'''
return self.GetTileAt(x, y, z).copy()
def SetTileAt(self, x, y, z, tile):
'''
:param int x:
:param int y:
:param int z:
'''
if z < len(self.zLevels):
self.zLevels[z].SetTile(x, y, tile)
def CreateTile(self):
'''
:rtype Tile:
'''
return Tile(self)
def generateTexAtlas(self, basedir, renderflags=0):
if self.generatedTexAtlas:
return
print('--- Generating texture atlas...')
self._icons = {}
self._dmis = {}
self.generatedTexAtlas = True
for tid in xrange(len(self.tileTypes)):
tile = self.tileTypes[tid]
img = Image.new('RGBA', (96, 96))
tile.offset = (32, 32)
tile.areaSelected = True
tile.render_deferred = False
for atom in sorted(tile.GetAtoms(), reverse=True):
aid = atom.id
# Ignore /areas. They look like ass.
if atom.path.startswith('/area'):
if not (renderflags & MapRenderFlags.RENDER_AREAS):
continue
# We're going to turn space black for smaller images.
if atom.path == '/turf/space':
if not (renderflags & MapRenderFlags.RENDER_STARS):
continue
if 'icon' not in atom.properties:
print('CRITICAL: UNKNOWN ICON IN {0} (atom #{1})'.format(tile.origID, aid))
print(atom.MapSerialize())
print(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES))
continue
dmi_file = atom.properties['icon'].value
if 'icon_state' not in atom.properties:
# Grab default icon_state ('') if we can't find the one defined.
atom.properties['icon_state'] = BYONDString("")
state = atom.properties['icon_state'].value
direction = SOUTH
if 'dir' in atom.properties:
try:
direction = int(atom.properties['dir'].value)
except ValueError:
print('FAILED TO READ dir = ' + repr(atom.properties['dir'].value))
continue
icon_key = '{0}:{1}[{2}]'.format(dmi_file, state, direction)
frame = None
pixel_x = 0
pixel_y = 0
if icon_key in self._icons:
frame, pixel_x, pixel_y = self._icons[icon_key]
else:
dmi_path = os.path.join(basedir, dmi_file)
dmi = None
if dmi_path in self._dmis:
dmi = self._dmis[dmi_path]
else:
try:
dmi = self.loadDMI(dmi_path)
self._dmis[dmi_path] = dmi
except Exception as e:
print(str(e))
for prop in ['icon', 'icon_state', 'dir']:
print('\t{0}'.format(atom.dumpPropInfo(prop)))
pass
if dmi.img is None:
self.log.warn('Unable to open {0}!'.format(dmi_path))
continue
if dmi.img.mode not in ('RGBA', 'P'):
self.log.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode))
if direction not in IMAGE_INDICES:
self.log.warn('Unrecognized direction {} on atom {} in tile {}!'.format(direction, atom.MapSerialize(), tile.origID))
direction = SOUTH # DreamMaker property editor shows dir = 2. WTF?
frame = dmi.getFrame(state, direction, 0)
if frame == None:
# Get the error/default state.
frame = dmi.getFrame("", direction, 0)
if frame == None:
continue
if frame.mode != 'RGBA':
frame = frame.convert("RGBA")
pixel_x = 0
if 'pixel_x' in atom.properties:
pixel_x = int(atom.properties['pixel_x'].value)
pixel_y = 0
if 'pixel_y' in atom.properties:
pixel_y = int(atom.properties['pixel_y'].value)
self._icons[icon_key] = (frame, pixel_x, pixel_y)
img.paste(frame, (32 + pixel_x, 32 - pixel_y), frame) # Add to the top of the stack.
if pixel_x != 0 or pixel_y != 0:
tile.render_deferred = True
tile.frame = img
# Fade out unselected tiles.
bands = list(img.split())
# Excluding alpha band
for i in range(3):
bands[i] = bands[i].point(lambda x: x * 0.4)
tile.unselected_frame = Image.merge(img.mode, bands)
self.tileTypes[tid] = tile
def renderAtom(self, atom, basedir, skip_alpha=False):
if 'icon' not in atom.properties:
logging.critical('UNKNOWN ICON IN ATOM #{0} ({1})'.format(atom.ID, atom.path))
logging.info(atom.MapSerialize())
logging.info(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES))
return None
# else:
# logging.info('Icon found for #{}.'.format(atom.ID))
dmi_file = atom.properties['icon'].value
if dmi_file is None:
return None
# Grab default icon_state ('') if we can't find the one defined.
state = atom.getProperty('icon_state', '')
direction = SOUTH
if 'dir' in atom.properties:
try:
direction = int(atom.properties['dir'].value)
except ValueError:
logging.critical('FAILED TO READ dir = ' + repr(atom.properties['dir'].value))
return None
icon_key = '{0}|{1}|{2}'.format(dmi_file, state, direction)
frame = None
pixel_x = 0
pixel_y = 0
if icon_key in _icons:
frame, pixel_x, pixel_y = _icons[icon_key]
else:
dmi_path = os.path.join(basedir, dmi_file)
dmi = None
if dmi_path in _dmis:
dmi = _dmis[dmi_path]
else:
try:
dmi = DMI(dmi_path)
dmi.loadAll()
_dmis[dmi_path] = dmi
except Exception as e:
print(str(e))
for prop in ['icon', 'icon_state', 'dir']:
print('\t{0}'.format(atom.dumpPropInfo(prop)))
pass
if dmi.img is None:
logging.warning('Unable to open {0}!'.format(dmi_path))
return None
if dmi.img.mode not in ('RGBA', 'P'):
logging.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode))
if direction not in IMAGE_INDICES:
logging.warn('Unrecognized direction {} on atom {}!'.format(direction, str(atom)))
direction = SOUTH # DreamMaker property editor shows dir = 2. WTF?
frame = dmi.getFrame(state, direction, 0)
if frame == None:
# Get the error/default state.
frame = dmi.getFrame("", direction, 0)
if frame == None:
return None
if frame.mode != 'RGBA':
frame = frame.convert("RGBA")
pixel_x = 0
if 'pixel_x' in atom.properties:
pixel_x = int(atom.properties['pixel_x'].value)
pixel_y = 0
if 'pixel_y' in atom.properties:
pixel_y = int(atom.properties['pixel_y'].value)
_icons[icon_key] = (frame, pixel_x, pixel_y)
# Handle BYOND alpha and coloring
c_frame = frame
alpha = int(atom.getProperty('alpha', 255))
if skip_alpha:
alpha = 255
color = atom.getProperty('color', '#FFFFFF')
if alpha != 255 or color != '#FFFFFF':
c_frame = tint_image(frame, BYOND2RGBA(color, alpha))
return c_frame
def generateImage(self, filename_tpl, basedir='.', renderflags=0, z=None, **kwargs):
'''
Instead of generating on a tile-by-tile basis, this creates a large canvas and places
each atom on it after sorting layers. This resolves the pixel_(x,y) problem.
'''
if z is None:
for z in range(len(self.zLevels)):
self.generateImage(filename_tpl, basedir, renderflags, z, **kwargs)
return
self.selectedAreas = ()
skip_alpha = False
render_types = ()
if 'area' in kwargs:
self.selectedAreas = kwargs['area']
if 'render_types' in kwargs:
render_types = kwargs['render_types']
if 'skip_alpha' in kwargs:
skip_alpha = kwargs['skip_alpha']
print('Checking z-level {0}...'.format(z))
instancePositions = {}
for y in range(self.zLevels[z].height):
for x in range(self.zLevels[z].width):
t = self.zLevels[z].GetTile(x, y)
# print('*** {},{}'.format(x,y))
if t is None:
continue
if len(self.selectedAreas) > 0:
renderThis = True
for atom in t.GetAtoms():
if atom.path.startswith('/area'):
if atom.path not in self.selectedAreas:
renderThis = False
if not renderThis: continue
for atom in t.GetAtoms():
if atom is None: continue
iid = atom.ID
if atom.path.startswith('/area'):
if atom.path not in self.selectedAreas:
continue
# Check for render restrictions
if len(render_types) > 0:
found = False
for path in render_types:
if atom.path.startswith(path):
found = True
if not found:
continue
# Ignore /areas. They look like ass.
if atom.path.startswith('/area'):
if not (renderflags & MapRenderFlags.RENDER_AREAS):
continue
# We're going to turn space black for smaller images.
if atom.path == '/turf/space':
if not (renderflags & MapRenderFlags.RENDER_STARS):
continue
if iid not in instancePositions:
instancePositions[iid] = []
# pixel offsets
'''
pixel_x = int(atom.getProperty('pixel_x', 0))
pixel_y = int(atom.getProperty('pixel_y', 0))
t_o_x = int(round(pixel_x / 32))
t_o_y = int(round(pixel_y / 32))
pos = (x + t_o_x, y + t_o_y)
'''
pos = (x, y)
instancePositions[iid].append(pos)
t=None
if len(instancePositions) == 0:
return
print(' Rendering...')
levelAtoms = []
for iid in instancePositions:
levelAtoms += [self.GetInstance(iid)]
pic = Image.new('RGBA', ((self.zLevels[z].width + 2) * 32, (self.zLevels[z].height + 2) * 32), "black")
# Bounding box, used for cropping.
bbox = [99999, 99999, 0, 0]
# Replace {z} with current z-level.
filename = filename_tpl.replace('{z}', str(z))
pastes = 0
for atom in sorted(levelAtoms, reverse=True):
if atom.ID not in instancePositions:
levelAtoms.remove(atom)
continue
icon = self.renderAtom(atom, basedir, skip_alpha)
if icon is None:
levelAtoms.remove(atom)
continue
for x, y in instancePositions[atom.ID]:
new_bb = self.getBBoxForAtom(x, y, atom, icon)
# print('{0},{1} = {2}'.format(x, y, new_bb))
# Adjust cropping bounds
if new_bb[0] < bbox[0]:
bbox[0] = new_bb[0]
if new_bb[1] < bbox[1]:
bbox[1] = new_bb[1]
if new_bb[2] > bbox[2]:
bbox[2] = new_bb[2]
if new_bb[3] > bbox[3]:
bbox[3] = new_bb[3]
pic.paste(icon, new_bb, icon)
pastes += 1
icon=None # Cleanup
levelAtoms.remove(atom)
levelAtoms = None
instancePositions = None
if len(self.selectedAreas) == 0:
# Autocrop (only works if NOT rendering stars or areas)
#pic = trim(pic) # FIXME: MemoryError on /vg/.
pic=pic # Hack
else:
# if nSelAreas == 0:
# continue
pic = pic.crop(bbox)
if pic is not None:
# Saev
filedir = os.path.dirname(os.path.abspath(filename))
if not os.path.isdir(filedir):
os.makedirs(filedir)
print(' -> {} ({}x{}) - {} objects'.format(filename, pic.size[0], pic.size[1], pastes))
pic.save(filename, 'PNG')
def getBBoxForAtom(self, x, y, atom, icon):
icon_width, icon_height = icon.size
pixel_x = int(atom.getProperty('pixel_x', 0))
pixel_y = int(atom.getProperty('pixel_y', 0))
return self.tilePosToBBox(x, y, pixel_x, pixel_y, icon_height, icon_width)
def tilePosToBBox(self, tile_x, tile_y, pixel_x, pixel_y, icon_height, icon_width):
# Tile Pos
X = tile_x * 32
Y = tile_y * 32
# pixel offsets
X += pixel_x
Y -= pixel_y
# BYOND coordinates -> PIL coords.
# BYOND uses LOWER left.
# PIL uses UPPER left
X += 0
Y += 32 - icon_height
return (
X,
Y,
X + icon_width,
Y + icon_height
)
# So we can read a map without parsing the tree.
def GetAtom(self, path):
if self.tree is not None:
atom = self.tree.GetAtom(path)
if atom is None and self.forgiving_atom_lookups:
self.missing_atoms.add(path)
return Atom(path, '(map)', missing=True)
return atom
return Atom(path)
| {
"content_hash": "b0316160631c4266094bf946f3dcd860",
"timestamp": "",
"source": "github",
"line_count": 1059,
"max_line_length": 172,
"avg_line_length": 35.346553352219075,
"alnum_prop": 0.4773723017738833,
"repo_name": "Boggart/ByondTools",
"id": "19faf94f13d3effaa0eedd801a997f52aec41664",
"size": "37432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byond/map/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DM",
"bytes": "6575"
},
{
"name": "Python",
"bytes": "292003"
}
],
"symlink_target": ""
} |
class StagedObject:
"""
Use this class as a mixin to provide an interface for onStage/offStage objects.
The idea here is that a DistributedObject could be present and active due to
simple visibility, but we want to hide or otherwise disable it for some reason.
"""
UNKNOWN = -1
OFF = 0
ON = 1
def __init__(self, initState = UNKNOWN):
"""
Only sets the initial state of this object. This will not
call any "handle" functions.
"""
self.__state = initState
def goOnStage(self, *args, **kw):
"""
If a stage switch is needed, the correct "handle" function
will be called. Otherwise, nothing happens.
"""
# This is the high level function that clients of
# your class should call to set the on/off stage state.
if not self.isOnStage():
self.handleOnStage(*args, **kw)
def handleOnStage(self):
"""
Override this function to provide your on/off stage funcitionality.
Don't forget to call down to this one, though.
"""
self.__state = StagedObject.ON
def goOffStage(self, *args, **kw):
"""
If a stage switch is needed, the correct "handle" function
will be called. Otherwise, nothing happens.
"""
# This is the high level function that clients of
# your class should call to set the on/off stage state.
if not self.isOffStage():
self.handleOffStage(*args, **kw)
def handleOffStage(self):
"""
Override this function to provide your on/off stage funcitionality.
Don't forget to call down to this one, though.
"""
self.__state = StagedObject.OFF
def isOnStage(self):
return self.__state == StagedObject.ON
def isOffStage(self):
return self.__state == StagedObject.OFF
| {
"content_hash": "43ac93dbbd5a6cc0959c6d375b17de11",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 30.365079365079364,
"alnum_prop": 0.6032409827496079,
"repo_name": "chandler14362/panda3d",
"id": "3127bd6e0651a7e8bc8ceca470600eca8782379d",
"size": "1914",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "direct/src/distributed/StagedObject.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "5288285"
},
{
"name": "C++",
"bytes": "27114399"
},
{
"name": "Emacs Lisp",
"bytes": "229264"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3113"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "61448"
},
{
"name": "Nemerle",
"bytes": "3001"
},
{
"name": "Objective-C",
"bytes": "27625"
},
{
"name": "Objective-C++",
"bytes": "258129"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl 6",
"bytes": "27055"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5568942"
},
{
"name": "R",
"bytes": "421"
},
{
"name": "Roff",
"bytes": "3432"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""
gallerize
~~~~~~~~~
:Copyright: 2007-2021 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
VERSION = '0.5-dev'
| {
"content_hash": "b8142d69b85a308c01010f123867fdcc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 42,
"avg_line_length": 14.777777777777779,
"alnum_prop": 0.6541353383458647,
"repo_name": "homeworkprod/gallerize",
"id": "9a638e22275cf69f2924d90b79be173abdab6e11",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/gallerize/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2972"
},
{
"name": "HTML",
"bytes": "2827"
},
{
"name": "Python",
"bytes": "13059"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from difflib import unified_diff
import os
from mercurial import ui
from mercurial.localrepo import localrepository as hg_repo
from mercurial.util import matchdate, Abort
from pyvcs.commit import Commit
from pyvcs.exceptions import CommitDoesNotExist, FileDoesNotExist, FolderDoesNotExist
from pyvcs.repository import BaseRepository
from pyvcs.utils import generate_unified_diff
class Repository(BaseRepository):
def __init__(self, path, **kwargs):
"""
path is the filesystem path where the repo exists, **kwargs are
anything extra fnor accessing the repo
"""
self.repo = hg_repo(ui.ui(), path=path)
self.path = path
self.extra = kwargs
def _ctx_to_commit(self, ctx):
diff = generate_unified_diff(self, ctx.files(), ctx.parents()[0].rev(), ctx.rev())
return Commit(ctx.rev(),
ctx.user(),
datetime.fromtimestamp(ctx.date()[0]),
ctx.description(),
ctx.files(),
diff)
def _latest_from_parents(self, parent_list):
pass
def get_commit_by_id(self, commit_id):
"""
Returns a commit by it's id (nature of the ID is VCS dependent).
"""
changeset = self.repo.changectx(commit_id)
return self._ctx_to_commit(changeset)
def get_recent_commits(self, since=None):
"""
Returns all commits since since. If since is None returns all commits
from the last 5 days of commits.
"""
cur_ctx = self.repo.changectx(self.repo.changelog.rev(self.repo.changelog.tip()))
if since is None:
since = datetime.fromtimestamp(cur_ctx.date()[0]) - timedelta(5)
changesets = []
to_look_at = [cur_ctx]
while to_look_at:
head = to_look_at.pop(0)
to_look_at.extend(head.parents())
if datetime.fromtimestamp(head.date()[0]) >= since:
changesets.append(head)
else:
break
return [self._ctx_to_commit(ctx) for ctx in changesets]
def list_directory(self, path, revision=None):
"""
Returns a list of files in a directory (list of strings) at a given
revision, or HEAD if revision is None.
"""
if revision is None:
chgctx = self.repo.changectx('tip')
else:
chgctx = self.repo.changectx(revision)
file_list = []
folder_list = set()
found_path = False
for file, node in chgctx.manifest().items():
if not file.startswith(path):
continue
found_path = True
file = file[len(path):]
if file.count(os.path.sep) >= 1:
folder_list.add(file[:file.find(os.path.sep)])
else:
file_list.append(file)
if not found_path:
# If we never found the path within the manifest, it does not exist.
raise FolderDoesNotExist
return file_list, sorted(list(folder_list))
def file_contents(self, path, revision=None):
"""
Returns the contents of a file as a string at a given revision, or
HEAD if revision is None.
"""
if revision is None:
chgctx = self.repo.changectx('tip')
else:
chgctx = self.repo.changectx(revision)
try:
return chgctx.filectx(path).data()
except KeyError:
raise FileDoesNotExist
| {
"content_hash": "9457f8bc598eaa1ddcf2366d643ecc65",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 90,
"avg_line_length": 34.04761904761905,
"alnum_prop": 0.5804195804195804,
"repo_name": "alex/pyvcs",
"id": "4349f340c19eb03fd4f41c84f8356d13e3d05490",
"size": "3575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvcs/backends/hg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31040"
}
],
"symlink_target": ""
} |
try:
from numpy.distutils.fcompiler import FCompiler
def runtime_library_dir_option(self, dir):
return self.c_compiler.runtime_library_dir_option(dir)
FCompiler.runtime_library_dir_option = \
runtime_library_dir_option
except Exception:
pass
def configuration(parent_package='',top_path=None):
INCLUDE_DIRS = []
LIBRARY_DIRS = []
LIBRARIES = []
# PETSc
import os
PETSC_DIR = os.environ['PETSC_DIR']
PETSC_ARCH = os.environ.get('PETSC_ARCH', '')
from os.path import join, isdir
if PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH)):
INCLUDE_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'include'),
join(PETSC_DIR, 'include')]
LIBRARY_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'lib')]
else:
if PETSC_ARCH: pass # XXX should warn ...
INCLUDE_DIRS += [join(PETSC_DIR, 'include')]
LIBRARY_DIRS += [join(PETSC_DIR, 'lib')]
LIBRARIES += [#'petscts', 'petscsnes', 'petscksp',
#'petscdm', 'petscmat', 'petscvec',
'petsc']
# PETSc for Python
import petsc4py
INCLUDE_DIRS += [petsc4py.get_include()]
# Configuration
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_package, top_path)
config.add_extension('Bratu2D',
sources = ['Bratu2D.pyf',
'Bratu2D.F90'],
depends = ['Bratu2Dmodule.h'],
f2py_options=['--quiet'],
define_macros=[('F2PY_REPORT_ON_ARRAY_COPY',1)],
include_dirs=INCLUDE_DIRS + [os.curdir],
libraries=LIBRARIES,
library_dirs=LIBRARY_DIRS,
runtime_library_dirs=LIBRARY_DIRS)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "01bbc552ba62b2c45ef6e5f8d29c0bc6",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 73,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.5555555555555556,
"repo_name": "pcmagic/stokes_flow",
"id": "525b33866dabc90550ae410264f92e07cb331cb0",
"size": "2082",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pkgs/petsc4py-3.7.0/demo/wrap-f2py/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32833"
},
{
"name": "C++",
"bytes": "221"
},
{
"name": "CSS",
"bytes": "1645"
},
{
"name": "Fortran",
"bytes": "12772"
},
{
"name": "Gnuplot",
"bytes": "2957"
},
{
"name": "HTML",
"bytes": "22464"
},
{
"name": "JavaScript",
"bytes": "9553"
},
{
"name": "Jupyter Notebook",
"bytes": "326253745"
},
{
"name": "MATLAB",
"bytes": "82969"
},
{
"name": "Makefile",
"bytes": "6488"
},
{
"name": "Mathematica",
"bytes": "765914"
},
{
"name": "Objective-C",
"bytes": "793"
},
{
"name": "Python",
"bytes": "1404660"
}
],
"symlink_target": ""
} |
"""Extension action implementations"""
import itertools
from eclcli.common import command
from eclcli.common import utils
class ListExtension(command.Lister):
"""List API extensions"""
def get_parser(self, prog_name):
parser = super(ListExtension, self).get_parser(prog_name)
parser.add_argument(
'--compute',
action='store_true',
default=False,
help='List extensions for the Compute API')
parser.add_argument(
'--identity',
action='store_true',
default=False,
help='List extensions for the Identity API')
parser.add_argument(
'--network',
action='store_true',
default=False,
help='List extensions for the Network API')
parser.add_argument(
'--volume',
action='store_true',
default=False,
help='List extensions for the Block Storage API')
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output')
return parser
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('Name', 'Namespace', 'Description',
'Alias', 'Updated', 'Links')
else:
columns = ('Name', 'Alias', 'Description')
data = []
# by default we want to show everything, unless the
# user specifies one or more of the APIs to show
# for now, only identity and compute are supported.
show_all = (not parsed_args.identity and not parsed_args.compute
and not parsed_args.volume and not parsed_args.network)
if parsed_args.identity or show_all:
identity_client = self.app.client_manager.identity
try:
data += identity_client.extensions.list()
except Exception:
message = "Extensions list not supported by Identity API"
self.log.warning(message)
if parsed_args.compute or show_all:
compute_client = self.app.client_manager.compute
try:
data += compute_client.list_extensions.show_all()
except Exception:
message = "Extensions list not supported by Compute API"
self.log.warning(message)
if parsed_args.volume or show_all:
volume_client = self.app.client_manager.volume
try:
data += volume_client.list_extensions.show_all()
except Exception:
message = "Extensions list not supported by Block Storage API"
self.log.warning(message)
# Resource classes for the above
extension_tuples = (
utils.get_item_properties(
s,
columns,
formatters={},
) for s in data
)
# Dictionaries for the below
if parsed_args.network or show_all:
network_client = self.app.client_manager.network
try:
data = network_client.extensions()
dict_tuples = (
utils.get_item_properties(
s,
columns,
formatters={},
) for s in data
)
extension_tuples = itertools.chain(
extension_tuples,
dict_tuples
)
except Exception:
message = "Extensions list not supported by Network API"
self.log.warning(message)
return (columns, extension_tuples)
| {
"content_hash": "28250f062bb22e3801474f7e55ec3307",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 34.5045871559633,
"alnum_prop": 0.5315075777718692,
"repo_name": "nttcom/eclcli",
"id": "9b66a55ea7fbcb1dbe716dac14da0a2c253177e7",
"size": "4374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eclcli/common/extension.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2087533"
}
],
"symlink_target": ""
} |
from os import environ
from os.path import join
import numpy as np
from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
from scipy.linalg import solve
from matplotlib import pyplot as plt
import seaborn as sns
plt.interactive(True)
from redmonster.datamgr.io2 import read_ndArch
from redmonster.physics.misc import poly_array
data,parlists,infodict = read_ndArch(join(environ['REDMONSTER_TEMPLATES_DIR'],
'ndArch-ssp_galaxy_noemit-v000.fits'))
temp = data[10,2]
tempwave = 10**(infodict['coeff0'] + 0.0001 * np.arange(temp.shape[-1]))
hdu = fits.open('/Users/timhutchinson/Desktop/test.fits')
spec = hdu[0].data[0]
ivar = hdu[1].data[0]
wave = 10**(hdu[0].header['COEFF0'] + 0.0001 * \
np.arange(hdu[0].data.shape[-1]))
ind = np.abs(tempwave - wave[0]).argmin()
temp = temp[ind:ind+spec.shape[0]]
tempwave = tempwave[ind:ind+spec.shape[0]]
pmat = np.zeros((temp.shape[0],2))
ninv = np.diag(ivar)
pmat[:,0] = temp
pmat[:,1] = np.ones(temp.shape[0])
a = solve(np.dot(np.dot(np.transpose(pmat), ninv), pmat),
np.dot(np.dot(np.transpose(pmat), ninv), spec))
model = np.dot(pmat, a)
sns.set_palette(sns.color_palette("hls", 8))
sns.set_style('white')
f = plt.figure()
ax = f.add_subplot(211)
plt.plot(wave, convolve(spec, Box1DKernel(5)), color='black')
plt.plot(wave, model, color=sns.color_palette("hls", 8)[0])
plt.ylabel(r'Flux (arbitrary)', size=14)
plt.axis([wave[0], wave[-1], -0.5,4])
ax = f.add_subplot(212)
plt.plot(wave, convolve(spec-model, Box1DKernel(5)), color='black')
plt.ylabel(r'Flux (arbitrary)', size=14)
plt.xlabel(r'Rest-frame wavelength ($\AA$)', size=14)
plt.axis([wave[0], wave[-1], -1,3])
f.tight_layout()
f.savefig('/Users/timhutchinson/compute/repos/elg-templates/plots/stackfits.pdf')
#fit OII line at XXX A and plot from 3700 to 3760
| {
"content_hash": "26b89432db1b982906d6f1f12caf98d2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 81,
"avg_line_length": 33.339285714285715,
"alnum_prop": 0.6845206213176218,
"repo_name": "timahutchinson/elg-templates",
"id": "9172e2edd45a81c26acffdda1b55b2f3e70c7b2e",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/elg_templates/fit_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36710"
}
],
"symlink_target": ""
} |
"""
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.objects import ColumnDataSource, Plot
from bokeh.plotting import circle, rect, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.widgets import HBox, VBox, VBoxForm, PreText, Select
data_dir = join(dirname(__file__), "daily")
tickers = listdir(data_dir)
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker : data.c, ticker + "_returns" : data.c.diff()})
return data
pd_cache = {}
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
data = data.dropna()
pd_cache[(ticker1, ticker2)]= data
return data
class StockApp(VBox):
extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
jsmodel = "VBox"
# text statistics
pretext = Instance(PreText)
# plots
plot = Instance(Plot)
line_plot1 = Instance(Plot)
line_plot2 = Instance(Plot)
hist1 = Instance(Plot)
hist2 = Instance(Plot)
# data source
source = Instance(ColumnDataSource)
# layout boxes
mainrow = Instance(HBox)
histrow = Instance(HBox)
statsbox = Instance(VBox)
# inputs
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
ticker1_select = Instance(Select)
ticker2_select = Instance(Select)
input_box = Instance(VBoxForm)
def __init__(self, *args, **kwargs):
super(StockApp, self).__init__(*args, **kwargs)
self._dfs = {}
@classmethod
def create(cls):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
# create layout widgets
obj = cls()
obj.mainrow = HBox()
obj.histrow = HBox()
obj.statsbox = VBox()
obj.input_box = VBoxForm()
# create input widgets
obj.make_inputs()
# outputs
obj.pretext = PreText(text="", width=500)
obj.make_source()
obj.make_plots()
obj.make_stats()
# layout
obj.set_children()
return obj
def make_inputs(self):
self.ticker1_select = Select(
name='ticker1',
value='AAPL',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
self.ticker2_select = Select(
name='ticker2',
value='GOOG',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
@property
def selected_df(self):
pandas_df = self.df
selected = self.source.selected
if selected:
pandas_df = pandas_df.iloc[selected, :]
return pandas_df
def make_source(self):
self.source = ColumnDataSource(data=self.df)
def line_plot(self, ticker, x_range=None):
plot = circle(
'date', ticker,
title=ticker,
size=2,
x_range=x_range,
x_axis_type='datetime',
source=self.source,
title_text_font_size="10pt",
plot_width=1000, plot_height=200,
nonselection_alpha=0.02,
tools="pan,wheel_zoom,select"
)
return plot
def hist_plot(self, ticker):
global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
return rect(
center, hist/2.0, width, hist,
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
def make_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
self.plot = circle(
ticker1 + "_returns", ticker2 + "_returns",
size=2,
title="%s vs %s" %(ticker1, ticker2),
source=self.source,
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,select",
title_text_font_size="10pt",
nonselection_alpha=0.02,
)
self.line_plot1 = self.line_plot(ticker1)
self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
self.hist_plots()
def hist_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
self.hist1 = self.hist_plot(ticker1)
self.hist2 = self.hist_plot(ticker2)
def set_children(self):
self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
self.mainrow.children = [self.input_box, self.plot, self.statsbox]
self.input_box.children = [self.ticker1_select, self.ticker2_select]
self.histrow.children = [self.hist1, self.hist2]
self.statsbox.children = [self.pretext]
def input_change(self, obj, attrname, old, new):
if obj == self.ticker2_select:
self.ticker2 = new
if obj == self.ticker1_select:
self.ticker1 = new
self.make_source()
self.make_plots()
self.set_children()
curdoc().add(self)
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
if self.ticker1_select:
self.ticker1_select.on_change('value', self, 'input_change')
if self.ticker2_select:
self.ticker2_select.on_change('value', self, 'input_change')
def make_stats(self):
stats = self.selected_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
self.hist_plots()
self.set_children()
curdoc().add(self)
@property
def df(self):
return get_data(self.ticker1, self.ticker2)
# The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# will render this StockApp. If you don't want serve this applet from a Bokeh
# server (for instance if you are embedding in a separate Flask application),
# then just remove this block of code.
@bokeh_app.route("/bokeh/stocks/")
@object_page("stocks")
def make_object():
app = StockApp.create()
return app
| {
"content_hash": "a95ec7f2251bf99a56a99f331888dc52",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 86,
"avg_line_length": 30.780590717299578,
"alnum_prop": 0.5916381082933516,
"repo_name": "jakevdp/bokeh",
"id": "38bbc7063df49be8c56f52f3e6a14289f73fc6de",
"size": "7295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/app/stock_applet/stock_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("media_service", "0009_auto_20190204_1606"),
]
operations = [
migrations.AlterField(
model_name="collection",
name="iiif_source",
field=models.CharField(
choices=[("images", "Collection Images"), ("custom", "IIIF Manifest")],
default="images",
max_length=100,
),
),
migrations.AlterField(
model_name="coursecopy",
name="data",
field=models.TextField(blank=True, default="{}"),
),
migrations.AlterField(
model_name="coursecopy",
name="state",
field=models.CharField(
choices=[
("initiated", "Initiated"),
("completed", "Completed"),
("error", "Error"),
],
default="initiated",
max_length=100,
),
),
]
| {
"content_hash": "04f687fdfa77f1c857e20042ff143517",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 87,
"avg_line_length": 27.975,
"alnum_prop": 0.4727435210008937,
"repo_name": "Harvard-ATG/media_management_api",
"id": "f53769af1dc724f7ed4f76548c4a29bca7205b7c",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "media_management_api/media_service/migrations/0010_auto_20200625_1726.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "219422"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
} |
from . import compiled
class Compiler:
START_CHAR = "{"
STMNT = "%"
STMNT_END = "%}"
EXPR = "{"
EXPR_END = "}}"
def __init__(self, file_in, file_out, indent=0, seq=0, loader=None):
self.file_in = file_in
self.file_out = file_out
self.loader = loader
self.seq = seq
self._indent = indent
self.stack = []
self.in_literal = False
self.flushed_header = False
self.args = "*a, **d"
def indent(self, adjust=0):
if not self.flushed_header:
self.flushed_header = True
self.indent()
self.file_out.write("def render%s(%s):\n" % (str(self.seq) if self.seq else "", self.args))
self.stack.append("def")
self.file_out.write(" " * (len(self.stack) + self._indent + adjust))
def literal(self, s):
if not s:
return
if not self.in_literal:
self.indent()
self.file_out.write('yield """')
self.in_literal = True
self.file_out.write(s.replace('"', '\\"'))
def close_literal(self):
if self.in_literal:
self.file_out.write('"""\n')
self.in_literal = False
def render_expr(self, e):
self.indent()
self.file_out.write('yield str(' + e + ')\n')
def parse_statement(self, stmt):
tokens = stmt.split(None, 1)
if tokens[0] == "args":
if len(tokens) > 1:
self.args = tokens[1]
else:
self.args = ""
elif tokens[0] == "set":
self.indent()
self.file_out.write(stmt[3:].strip() + "\n")
elif tokens[0] == "include":
if not self.flushed_header:
# If there was no other output, we still need a header now
self.indent()
tokens = tokens[1].split(None, 1)
args = ""
if len(tokens) > 1:
args = tokens[1]
if tokens[0][0] == "{":
self.indent()
# "1" as fromlist param is uPy hack
self.file_out.write('_ = __import__(%s.replace(".", "_"), None, None, 1)\n' % tokens[0][2:-2])
self.indent()
self.file_out.write("yield from _.render(%s)\n" % args)
return
with self.loader.input_open(tokens[0][1:-1]) as inc:
self.seq += 1
c = Compiler(inc, self.file_out, len(self.stack) + self._indent, self.seq)
inc_id = self.seq
self.seq = c.compile()
self.indent()
self.file_out.write("yield from render%d(%s)\n" % (inc_id, args))
elif len(tokens) > 1:
if tokens[0] == "elif":
assert self.stack[-1] == "if"
self.indent(-1)
self.file_out.write(stmt + ":\n")
else:
self.indent()
self.file_out.write(stmt + ":\n")
self.stack.append(tokens[0])
else:
if stmt.startswith("end"):
assert self.stack[-1] == stmt[3:]
self.stack.pop(-1)
elif stmt == "else":
assert self.stack[-1] == "if"
self.indent(-1)
self.file_out.write("else:\n")
else:
assert False
def parse_line(self, l):
while l:
start = l.find(self.START_CHAR)
if start == -1:
self.literal(l)
return
self.literal(l[:start])
self.close_literal()
sel = l[start + 1]
#print("*%s=%s=" % (sel, EXPR))
if sel == self.STMNT:
end = l.find(self.STMNT_END)
assert end > 0
stmt = l[start + len(self.START_CHAR + self.STMNT):end].strip()
self.parse_statement(stmt)
end += len(self.STMNT_END)
l = l[end:]
if not self.in_literal and l == "\n":
break
elif sel == self.EXPR:
# print("EXPR")
end = l.find(self.EXPR_END)
assert end > 0
expr = l[start + len(self.START_CHAR + self.EXPR):end].strip()
self.render_expr(expr)
end += len(self.EXPR_END)
l = l[end:]
else:
self.literal(l[start])
l = l[start + 1:]
def header(self):
self.file_out.write("# Autogenerated file\n")
def compile(self):
self.header()
for l in self.file_in:
self.parse_line(l)
self.close_literal()
return self.seq
class Loader(compiled.Loader):
def __init__(self, pkg, dir):
super().__init__(pkg, dir)
self.dir = dir
if pkg == "__main__":
# if pkg isn't really a package, don't bother to use it
# it means we're running from "filesystem directory", not
# from a package.
pkg = None
self.pkg_path = ""
if pkg:
p = __import__(pkg)
if isinstance(p.__path__, str):
# uPy
self.pkg_path = p.__path__
else:
# CPy
self.pkg_path = p.__path__[0]
self.pkg_path += "/"
def input_open(self, template):
path = self.pkg_path + self.dir + "/" + template
return open(path)
def compiled_path(self, template):
return self.dir + "/" + template.replace(".", "_") + ".py"
def load(self, name):
try:
return super().load(name)
except (OSError, ImportError):
pass
compiled_path = self.pkg_path + self.compiled_path(name)
f_in = self.input_open(name)
f_out = open(compiled_path, "w")
c = Compiler(f_in, f_out, loader=self)
c.compile()
f_in.close()
f_out.close()
return super().load(name)
| {
"content_hash": "38260a8b6a0cc432ebd752c7e50c4691",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 110,
"avg_line_length": 32.294117647058826,
"alnum_prop": 0.4583540321245239,
"repo_name": "peterhinch/micropython-samples",
"id": "a9948a16849c4a526ecd1583731146aa590f8cdb",
"size": "6083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PicoWeb/utemplate/source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169363"
},
{
"name": "Shell",
"bytes": "2653"
},
{
"name": "Smarty",
"bytes": "183"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HEEnum(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule EEnum.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HEEnum, self).__init__(name='HEEnum', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """EEnum"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EEnum')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class EEnum() node
self.add_node()
self.vs[3]["mm__"] = """EEnum"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class EEnum()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class EEnum() node
self.add_node()
self.vs[5]["mm__"] = """EEnum"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class EEnum()
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class EEnum()
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class EEnum()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'name'),(3,'name')), ((5,'instanceClassName'),(3,'instanceClassName')), ((5,'serializable'),(3,'serializable')), ((5,'ApplyAttribute'),('constant','solveRef')), ]
| {
"content_hash": "83192f638299a9fba559054ef972e687",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 195,
"avg_line_length": 28.474358974358974,
"alnum_prop": 0.4673570463755065,
"repo_name": "levilucio/SyVOLT",
"id": "eea0f90085e6f7e9a5f7179bf68ae7156155d6c9",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ECore_Copier_MM/transformation/HEEnum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import sys
import threading
import random
import time
import voodoo.log as log
import voodoo.gen.registry.server_registry as ServerRegistry
import voodoo.gen.exceptions.registry.RegistryErrors as RegistryErrors
import voodoo.gen.exceptions.locator.LocatorErrors as LocatorErrors
import voodoo.gen.exceptions.coordinator.CoordinatorServerErrors as CoordinatorServerErrors
import voodoo.gen.exceptions.protocols.ProtocolErrors as ProtocolErrors
ALL = "all"
MAX_CACHE_TIME = 5 # seconds
class ServerLocator(object):
# TODO: cache object should be refactored out of this class but first all the "not tested / not unittested must be removed"
def __init__(self, coordinator_server_address, server_type_handler):
object.__init__(self)
self._registry = ServerRegistry.get_instance()
self._server_type_handler = server_type_handler
# Cache structure:
# self._cache = {
# ServerType.Login : {
# restrictions : server
# }
# }
self._cache = {}
self._cache_lock = threading.RLock()
self._coordinator = self._retrieve_coordinator(coordinator_server_address,server_type_handler)
def _time(self):
return time.time()
def _retrieve_coordinator(self,coordinator_server_address,server_type_handler):
server_type = server_type_handler.module.Coordinator
methods = self._server_type_handler.retrieve_methods(
server_type
)
return coordinator_server_address.create_client(methods)
def retrieve_methods(self, server_type):
return self._server_type_handler.retrieve_methods(server_type)
def inform_server_not_working(self,server_not_working,server_type,restrictions_of_server):
self._cache_lock.acquire()
try:
servers = self._cache.get(server_type)
if servers == None:
#TODO: not tested
return
client, creation_time = servers.get(restrictions_of_server)
if client == None:
#TODO: not tested
return
if client == server_not_working:
servers.pop(restrictions_of_server)
finally:
self._cache_lock.release()
def get_server(self,original_server_address, server_type,restrictions):
if not self._server_type_handler.isMember(server_type):
raise LocatorErrors.NotAServerTypeError('%s not a member of %s' %
(
server_type,
self._server_type_handler.module
)
)
server = self._get_server_from_cache(server_type,restrictions)
if server is not None:
return server # :-)
session_id = self._retrieve_session_id_from_coordinator(
original_server_address,
server_type,
restrictions
)
try:
there_are_more_servers = True
while there_are_more_servers:
try:
address = self._get_server_from_coordinator(session_id)
except CoordinatorServerErrors.NoServerFoundError:
there_are_more_servers = False
continue
server = self._get_server_from_registry(address)
if server is not None:
# The server was in the registry but not in the cache;
# First check if the server is up and running
if not self._test_server(server,address):
# There was some error
continue
# Server up and running :-)
# Now add it to the cache and return it
try:
self._save_server_in_cache(server, server_type, restrictions)
except LocatorErrors.ServerFoundInCacheError as server_found_in_cache:
# While we were calling the coordinator, some
# other thread retrieved the server. Use the
# server that was already in the cache
return server_found_in_cache.get_server()
else:
return server
# Server was not in the ServerRegistry neither in the cache
methods = self._server_type_handler.retrieve_methods(
server_type
)
try:
server = address.create_client(methods)
except ProtocolErrors.ClientProtocolError as ccce:
# TODO: not tested
# There was some error creating the client
log.log(
ServerLocator,
log.level.Warning,
"Generating client for server with %s raised exception %s. Trying another server..." % (
address.address,
ccce
)
)
log.log_exc(
ServerLocator,
log.level.Info
)
continue
# Check if the server is up and running
if not self._test_server(server,address):
# There was some error
continue
# Server up and running :-)
try:
self._save_server_in_registry_and_cache(
server,
server_type,
restrictions,
address
)
except LocatorErrors.ServerFoundInCacheError as e:
return e.get_server()
else:
return server
else:
raise LocatorErrors.NoServerFoundError(
"No server found of type %s and restrictions %s" %
(server_type, restrictions)
)
finally:
self._logout_from_coordinator( session_id )
def get_all_servers(self,original_server_address, server_type,restrictions =()):
if not self._server_type_handler.isMember(server_type):
#TODO: not tested
raise LocatorErrors.NotAServerTypeError('%s not a member of %s' %
(
server_type,
self._server_type_handler
)
)
all_servers = self._retrieve_all_servers_from_coordinator(original_server_address,server_type,restrictions)
ret_value = []
for server, networks in all_servers:
server_instances = self._retrieve_server_instances_from_networks(
networks,
server_type
)
ret_value.append((server,server_instances))
return ret_value
def get_server_from_coord_address(self, original_server_address, server_coord_address, server_type, how_many = 1):
networks = self._retrieve_networks_from_coordinator(
original_server_address,
server_coord_address
)
if len(networks) == 0:
raise LocatorErrors.NoNetworkAvailableError(
"Couldn't find a network for communicating original_server_address '%s' and server_coord_address '%s'" % (
original_server_address,
server_coord_address
)
)
return self._retrieve_server_instances_from_networks(networks, server_type, how_many)
def _retrieve_server_instances_from_networks(self, networks, server_type, how_many = ALL):
server_instances = []
for network in networks:
address = network.address
cur_server = self._get_server_from_registry(address)
if cur_server is not None:
# TODO: not unittested
# First check if the server is up and running
if not self._test_server(cur_server,address):
# There was some error
continue
server_instances.append(cur_server)
continue
# Server was not in the ServerRegistry
methods = self._server_type_handler.retrieve_methods(
server_type
)
try:
cur_server = address.create_client(methods)
except ProtocolErrors.ClientClassCreationError as ccce:
# TODO: not unittested
# There was some error creating the client
log.log(
ServerLocator,
log.level.Warning,
"Generating client for server with %s raised exception %s. Trying another server..." % (
address.address,
ccce
)
)
log.log_exc(
ServerLocator,
log.level.Info
)
continue
self._save_server_in_registry(address,cur_server)
# Check if the server is up and running
if not self._test_server(cur_server,address):
# TODO: not unittested
# There was some error
continue
# Server up and running :-)
server_instances.append(cur_server)
if how_many != ALL and len(server_instances) == how_many:
break
return server_instances
def _test_server(self,server,address):
""" _test_server(self,server,address) -> bool
It returns True (if we could perform a call to "test_me"), or False (if we couldn't)
"""
# Check if the server is up and running
try:
random_msg = str(random.random())
result_msg = server.test_me(random_msg)
if random_msg != result_msg:
# This was not a valid server, try another
log.log(
ServerLocator,
log.level.Warning,
"Test message received from server %s different from the message sent (%s vs %s). Trying another server" %(
address.address,
random_msg,
result_msg
)
)
return False
except Exception as e:
#There was a exception: this is not a valid server, try another
log.log(
ServerLocator,
log.level.Warning,
"Testing server %s raised exception %s. Trying another server" % (
address.address,
e
)
)
log.log_exc(ServerLocator, log.level.Info)
return False
else:
return True
# Server up and running :-)
def _retrieve_session_id_from_coordinator(self,original_server_address,server_type,restrictions):
try:
return self._coordinator.new_query(original_server_address,server_type,restrictions)
except ProtocolErrors.ProtocolError as pe:
log.log( ServerLocator, log.level.Error, "Problem while asking for new session id to the coordinator server. %s" % pe )
log.log_exc( ServerLocator, log.level.Warning )
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Couldn't retrieve new session id from coordinator server: " + str(pe),
pe
)
except Exception as e:
log.log( ServerLocator, log.level.Error, "Unexpected exception while asking for new session id to the coordinator server. %s" % e )
log.log_exc( ServerLocator, log.level.Warning )
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Unexpected exception while asking new session id from coordinator server: " + str(e),
e
)
def _logout_from_coordinator(self, session_id):
try:
self._coordinator.logout(session_id)
except Exception as e:
log.log( ServerLocator, log.level.Warning, "Unexpected exception while logging out from Coordinator Server. %s " % e)
log.log_exc( ServerLocator, log.level.Info )
def _retrieve_all_servers_from_coordinator(self,original_server_address,server_type,restrictions):
try:
return self._coordinator.get_all_servers(original_server_address,server_type,restrictions)
except ProtocolErrors.ProtocolError as pe:
# TODO: not unittested
log.log(
ServerLocator,
log.level.Error,
"Problem while asking for all servers to the coordinator server. %s" % pe
)
log.log_exc(
ServerLocator,
log.level.Warning
)
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Couldn't retrieve all servers from coordinator server: " + str(pe),
pe
)
except Exception as e:
# TODO: not unittested
log.log(
ServerLocator,
log.level.Error,
"Unexpected exception while asking for all servers to the coordinator server. %s" % e
)
log.log_exc(
ServerLocator,
log.level.Warning
)
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Unexpected exception while asking all servers from coordinator server: " + str(e),
e
)
def _retrieve_networks_from_coordinator(self,original_server_address,server_coord_address):
try:
return self._coordinator.get_networks(original_server_address,server_coord_address)
except ProtocolErrors.ProtocolError as pe:
# TODO: not unittested
log.log(
ServerLocator,
log.level.Error,
"Problem while asking for networks to the coordinator server. %s" % pe
)
log.log_exc(
ServerLocator,
log.level.Warning
)
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Couldn't retrieve networks from coordinator server: " + str(pe),
pe
)
except Exception as e:
# TODO: not unittested
log.log(
ServerLocator,
log.level.Error,
"Unexpected exception while asking for networks to the coordinator server. %s" % e
)
log.log_exc(
ServerLocator,
log.level.Warning
)
import traceback
traceback.print_exc()
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Unexpected exception while asking for networks from coordinator server: " + str(e),
e
)
def _get_server_from_coordinator(self, session_id):
try:
return self._coordinator.get_server(session_id)
except CoordinatorServerErrors.NoServerFoundError as nsfe:
raise nsfe
except ProtocolErrors.ProtocolError as pe:
log.log(
ServerLocator,
log.level.Error,
"Problem while asking for other server to the coordinator server. %s" % pe
)
log.log_exc(
ServerLocator,
log.level.Warning
)
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Couldn't ask for other server to coordinator server: " + str(pe),
pe
)
except Exception as e:
log.log(
ServerLocator,
log.level.Error,
"Unexpected exception while asking for other server to the coordinator server. %s" % e
)
log.log_exc(
ServerLocator,
log.level.Warning
)
raise LocatorErrors.ProblemCommunicatingWithCoordinatorError(
"Unexpected exception while asking for other server to the coordinator server: " + str(e),
e
)
def _get_server_from_cache(self,server_type,restrictions):
"""
Returns the server if it's found in the cache, or
None if it's not found.
"""
self._cache_lock.acquire()
try:
if not self._cache.has_key(server_type):
return None
server_type_cache = self._cache[server_type]
if not server_type_cache.has_key(restrictions):
return None
server, creation_time = server_type_cache[restrictions]
if self._time() - creation_time < MAX_CACHE_TIME:
return server
else:
server_type_cache.pop(restrictions)
return
finally:
self._cache_lock.release()
def _save_server_in_registry(self, address, server):
self._cache_lock.acquire()
try:
try:
self._registry.register_server(address.address,server)
except RegistryErrors.RegistryError as e:
# TODO: not unittested
log.log( ServerLocator, log.level.Info,
"RegistryError found registring server %s with address %s in registry: %s" % (server,address.address,e))
log.log_exc( ServerLocator, log.level.Debug )
print >> sys.stderr, "RegistryError found registring server %s with address %s in registry: %s" % (server,address.address,e)
import traceback
traceback.print_stack()
print >> sys.stderr, "Reregistering..."
print >> sys.stderr, ""
print >> sys.stderr, ""
self._registry.reregister_server(address.address,server)
finally:
self._cache_lock.release()
def _save_server_in_cache(self, server, server_type, restrictions):
self._cache_lock.acquire()
try:
if not self._cache.has_key(server_type):
self._cache[server_type] = {}
server_type_cache = self._cache[server_type]
if not server_type_cache.has_key(restrictions):
server_type_cache[restrictions] = (server, self._time())
else:
raise LocatorErrors.ServerFoundInCacheError(
server_type_cache[restrictions][0],
"There is already a server for server type %s and restrictions %s" % (server_type,restrictions)
)
finally:
self._cache_lock.release()
def _save_server_in_registry_and_cache(self, server, server_type, restrictions, address):
self._cache_lock.acquire()
try:
self._save_server_in_cache(server, server_type,restrictions)
self._save_server_in_registry(address, server)
finally:
self._cache_lock.release()
def _get_server_from_registry(self,address):
try:
return self._registry.get_server(address.address)
except RegistryErrors.RegistryError:
# It was not found in the registry
return None
| {
"content_hash": "b67056ba0ce40e67d7e54293cce03943",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 143,
"avg_line_length": 40.38709677419355,
"alnum_prop": 0.533047124600639,
"repo_name": "ganeshgore/myremolab",
"id": "2699010d825d012335aa55b463c8bb8538c5199f",
"size": "20411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/build/lib.linux-i686-2.7/voodoo/gen/locator/ServerLocator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "C#",
"bytes": "265761"
},
{
"name": "CSS",
"bytes": "39653"
},
{
"name": "Java",
"bytes": "689284"
},
{
"name": "JavaScript",
"bytes": "74198"
},
{
"name": "PHP",
"bytes": "97324"
},
{
"name": "Python",
"bytes": "5335681"
},
{
"name": "Shell",
"bytes": "794"
},
{
"name": "VHDL",
"bytes": "1372"
}
],
"symlink_target": ""
} |
"""QuadOperator stores a sum of products of canonical quadrature operators."""
from openfermion.ops._symbolic_operator import SymbolicOperator
class QuadOperator(SymbolicOperator):
"""QuadOperator stores a sum of products of canonical quadrature operators.
They are defined in terms of the bosonic ladder operators:
q = sqrt{hbar/2}(b+b^)
p = -isqrt{hbar/2}(b-b^)
where hbar is a constant appearing in the commutator of q and p:
[q, p] = i hbar
In OpenFermion, we describe the canonical quadrature operators acting
on quantum modes 'i' and 'j' using the shorthand:
'qi' = q_i
'pj' = p_j
where ['qi', 'pj'] = i hbar delta_ij is the commutator.
The QuadOperator class is designed (in general) to store sums of these
terms. For instance, an instance of QuadOperator might represent
.. code-block:: python
H = 0.5 * QuadOperator('q0 p5') + 0.3 * QuadOperator('q0')
Note for a QuadOperator to be a Hamiltonian which is a hermitian
operator, the coefficients of all terms must be real.
QuadOperator is a subclass of SymbolicOperator. Importantly, it has
attributes set as follows::
actions = ('q', 'p')
action_strings = ('q', 'p')
action_before_index = True
different_indices_commute = True
See the documentation of SymbolicOperator for more details.
Example:
.. code-block:: python
H = (QuadOperator('p0 q3', 0.5)
+ 0.6 * QuadOperator('p3 q0'))
# Equivalently
H2 = QuadOperator('p0 q3', 0.5)
H2 += QuadOperator('p3 q0', 0.6)
Note:
Adding QuadOperator is faster using += (as this
is done by in-place addition). Specifying the coefficient
during initialization is faster than multiplying a QuadOperator
with a scalar.
"""
@property
def actions(self):
"""The allowed actions."""
return ('q', 'p')
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('q', 'p')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return True
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return True
def is_normal_ordered(self):
"""Return whether or not term is in normal order.
In our convention, q operators come first.
Note that unlike the Fermion operator, due to the commutation
of quadrature operators with different indices, the QuadOperator
sorts quadrature operators by index.
"""
for term in self.terms:
for i in range(1, len(term)):
for j in range(i, 0, -1):
right_operator = term[j]
left_operator = term[j - 1]
if (right_operator[0] == left_operator[0] and
right_operator[1] == 'q' and
left_operator[1] == 'p'):
return False
return True
def is_gaussian(self):
"""Query whether the term is quadratic or lower in the
quadrature operators.
"""
for term in self.terms:
if len(term) > 2:
return False
return True
| {
"content_hash": "ac6b8aed8ddfbef40f2e8940c5281f87",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 32.88461538461539,
"alnum_prop": 0.6,
"repo_name": "jarrodmcc/OpenFermion",
"id": "29aa6bfebe535e5df99799a688851712be96f6d9",
"size": "3983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openfermion/ops/_quad_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370322"
},
{
"name": "Shell",
"bytes": "10029"
}
],
"symlink_target": ""
} |
from common.serializers.serialization import domain_state_serializer
from plenum.common.constants import ROLE, TXN_TYPE, NYM, TARGET_NYM, TXN_AUTHOR_AGREEMENT_TEXT, \
TXN_AUTHOR_AGREEMENT_VERSION, TXN_AUTHOR_AGREEMENT_RETIREMENT_TS, TXN_AUTHOR_AGREEMENT_DIGEST, \
TXN_AUTHOR_AGREEMENT_RATIFICATION_TS
from plenum.common.request import Request
from plenum.common.txn_util import reqToTxn, get_payload_data, append_txn_metadata
from plenum.common.util import get_utc_epoch
from plenum.server.request_handlers.static_taa_helper import StaticTAAHelper
from plenum.server.request_handlers.utils import nym_to_state_key
def create_nym_txn(identifier, role, nym="TARGET_NYM"):
return reqToTxn(Request(identifier=identifier,
operation={ROLE: role,
TXN_TYPE: NYM,
TARGET_NYM: nym}))
def update_nym(state, identifier, role):
state.set(nym_to_state_key(identifier),
domain_state_serializer.serialize(
create_nym_txn(identifier, role)['txn']['data']))
def check_taa_in_state(handler, digest, version, state_data):
assert handler.get_from_state(
StaticTAAHelper.state_path_taa_digest(digest)) == state_data
assert handler.state.get(
StaticTAAHelper.state_path_taa_version(version), isCommitted=False) == digest.encode()
def create_taa_txn(taa_request, taa_pp_time):
taa_seq_no = 1
taa_txn_time = taa_pp_time
txn_id = "id"
taa_txn = reqToTxn(taa_request)
payload = get_payload_data(taa_txn)
text = payload[TXN_AUTHOR_AGREEMENT_TEXT]
version = payload[TXN_AUTHOR_AGREEMENT_VERSION]
ratified = payload[TXN_AUTHOR_AGREEMENT_RATIFICATION_TS]
retired = payload.get(TXN_AUTHOR_AGREEMENT_RETIREMENT_TS)
digest = StaticTAAHelper.taa_digest(text, version)
append_txn_metadata(taa_txn, taa_seq_no, taa_txn_time, txn_id)
state_value = {TXN_AUTHOR_AGREEMENT_TEXT: text,
TXN_AUTHOR_AGREEMENT_VERSION: version,
TXN_AUTHOR_AGREEMENT_RATIFICATION_TS: ratified,
TXN_AUTHOR_AGREEMENT_DIGEST: digest}
if retired:
state_value[TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] = retired
return taa_txn, digest, (state_value, taa_seq_no, taa_txn_time)
| {
"content_hash": "b8a9a03aa266433bd8d352b2eb5c07fd",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 100,
"avg_line_length": 45.294117647058826,
"alnum_prop": 0.683982683982684,
"repo_name": "evernym/zeno",
"id": "46442edb45964f8cd0de7afa7fca100050a6be0e",
"size": "2310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/req_handler/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
def find_rule(major_info):
"""Find appropriate rule set for major_info
Args:
major_info: list containing {'type': x, 'field': y}
Returns:
rule set filename (excluding '.yml')
"""
# TODO:
return major_info, 'sample_cse_2016'
| {
"content_hash": "4ac9d40df429de4bf46a3cd1fb4265ec",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.5939849624060151,
"repo_name": "dnsdhrj/graduate-adventure",
"id": "235132518482bed92acc1d7250bc36fa8f8914bf",
"size": "266",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "backend/core/rule/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2418"
},
{
"name": "Python",
"bytes": "66446"
},
{
"name": "Vue",
"bytes": "11140"
}
],
"symlink_target": ""
} |
import pytest
from f5.bigip.tm.asm.policy_templates import Policy_Template
from f5.sdk_exception import UnsupportedOperation
from requests.exceptions import HTTPError
@pytest.fixture(scope='class')
def return_template(mgmt_root):
rc = mgmt_root.tm.asm.policy_templates_s.get_collection()
return rc[0], rc[0].id
class TestPolicyTemplates(object):
def test_create_raises(self, mgmt_root):
rc = mgmt_root.tm.asm.policy_templates_s
with pytest.raises(UnsupportedOperation):
rc.policy_template.create()
def test_delete_raises(self, mgmt_root):
rc = mgmt_root.tm.asm.policy_templates_s
with pytest.raises(UnsupportedOperation):
rc.policy_template.delete()
def test_modify_raises(self, mgmt_root):
rc = mgmt_root.tm.asm.policy_templates_s
with pytest.raises(UnsupportedOperation):
rc.policy_template.modify()
def test_refresh(self, mgmt_root):
res1, hashid = return_template(mgmt_root)
rc = mgmt_root.tm.asm.policy_templates_s
res2 = rc.policy_template.load(id=hashid)
assert res1.selfLink == res2.selfLink
assert res1.title == res2.title
assert res1.id == res2.id
assert res1.userDefined == res2.userDefined
res1.refresh()
assert res1.selfLink == res2.selfLink
assert res1.title == res2.title
assert res1.id == res2.id
assert res1.userDefined == res2.userDefined
def test_load_no_object(self, mgmt_root):
rc = mgmt_root.tm.asm.policy_templates_s
with pytest.raises(HTTPError) as err:
rc.policy_template.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, mgmt_root):
_, hashid = return_template(mgmt_root)
rc = mgmt_root.tm.asm.policy_templates_s
res = rc.policy_template.load(id=hashid)
link = 'https://localhost/mgmt/tm/asm/policy-templates/'
assert res.selfLink.startswith(link + hashid)
assert res.id == hashid
assert res.userDefined is False
def test_collection(self, mgmt_root):
sc = mgmt_root.tm.asm.policy_templates_s.get_collection()
assert isinstance(sc, list)
assert len(sc)
assert isinstance(sc[0], Policy_Template)
| {
"content_hash": "00485601bd16c967db9ec8d0f82a04c9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 65,
"avg_line_length": 36.63492063492063,
"alnum_prop": 0.6590121317157712,
"repo_name": "F5Networks/f5-common-python",
"id": "e5d70768bde92a21d46533e28a106dcc9c753d53",
"size": "2890",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "f5/bigip/tm/asm/test/functional/test_policy_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
} |
__revision__ = "src/engine/SCons/Options/BoolOption.py 2009/09/04 16:33:07 david"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def BoolOption(*args, **kw):
global warned
if not warned:
msg = "The BoolOption() function is deprecated; use the BoolVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return apply(SCons.Variables.BoolVariable, args, kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "cef2b99698bdd3d794a640b503432aea",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 97,
"avg_line_length": 31.14814814814815,
"alnum_prop": 0.727705112960761,
"repo_name": "cournape/numscons",
"id": "bcecffcff97fd36a294d94801952bb484a01f5b8",
"size": "1995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numscons/scons-local/scons-local-1.2.0/SCons/Options/BoolOption.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1275"
},
{
"name": "FORTRAN",
"bytes": "146"
},
{
"name": "Python",
"bytes": "2033297"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
} |
import copy
import io
import os
import shutil
import sys
import tempfile
import textwrap
import pytest
import salt.config
import salt.loader
import salt.utils.files
import salt.utils.versions
from salt.state import HighState
from salt.utils.pydsl import PyDslError
from tests.support.helpers import with_tempdir
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
REQUISITES = ["require", "require_in", "use", "use_in", "watch", "watch_in"]
class CommonTestCaseBoilerplate(TestCase):
def setUp(self):
self.root_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.root_dir, ignore_errors=True)
self.state_tree_dir = os.path.join(self.root_dir, "state_tree")
self.cache_dir = os.path.join(self.root_dir, "cachedir")
if not os.path.isdir(self.root_dir):
os.makedirs(self.root_dir)
if not os.path.isdir(self.state_tree_dir):
os.makedirs(self.state_tree_dir)
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
self.config = salt.config.minion_config(None)
self.config["root_dir"] = self.root_dir
self.config["state_events"] = False
self.config["id"] = "match"
self.config["file_client"] = "local"
self.config["file_roots"] = dict(base=[self.state_tree_dir])
self.config["cachedir"] = self.cache_dir
self.config["test"] = False
self.config["grains"] = salt.loader.grains(self.config)
self.HIGHSTATE = HighState(self.config)
self.HIGHSTATE.push_active()
def tearDown(self):
try:
self.HIGHSTATE.pop_active()
except IndexError:
pass
del self.config
del self.HIGHSTATE
def state_highstate(self, state, dirpath):
opts = copy.copy(self.config)
opts["file_roots"] = dict(base=[dirpath])
HIGHSTATE = HighState(opts)
HIGHSTATE.push_active()
try:
high, errors = HIGHSTATE.render_highstate(state)
if errors:
import pprint
pprint.pprint("\n".join(errors))
pprint.pprint(high)
out = HIGHSTATE.state.call_high(high)
# pprint.pprint(out)
finally:
HIGHSTATE.pop_active()
class PyDSLRendererTestCase(CommonTestCaseBoilerplate):
"""
WARNING: If tests in here are flaky, they may need
to be moved to their own class. Sharing HighState, especially
through setUp/tearDown can create dangerous race conditions!
"""
def render_sls(self, content, sls="", saltenv="base", **kws):
if "env" in kws:
# "env" is not supported; Use "saltenv".
kws.pop("env")
return self.HIGHSTATE.state.rend["pydsl"](
io.StringIO(content), saltenv=saltenv, sls=sls, **kws
)
@pytest.mark.slow_test
def test_state_declarations(self):
result = self.render_sls(
textwrap.dedent(
"""
state('A').cmd.run('ls -la', cwd='/var/tmp')
state().file.managed('myfile.txt', source='salt://path/to/file')
state('X').cmd('run', 'echo hello world', cwd='/')
a_cmd = state('A').cmd
a_cmd.run(shell='/bin/bash')
state('A').service.running(name='apache')
"""
)
)
self.assertTrue("A" in result and "X" in result)
A_cmd = result["A"]["cmd"]
self.assertEqual(A_cmd[0], "run")
self.assertEqual(A_cmd[1]["name"], "ls -la")
self.assertEqual(A_cmd[2]["cwd"], "/var/tmp")
self.assertEqual(A_cmd[3]["shell"], "/bin/bash")
A_service = result["A"]["service"]
self.assertEqual(A_service[0], "running")
self.assertEqual(A_service[1]["name"], "apache")
X_cmd = result["X"]["cmd"]
self.assertEqual(X_cmd[0], "run")
self.assertEqual(X_cmd[1]["name"], "echo hello world")
self.assertEqual(X_cmd[2]["cwd"], "/")
del result["A"]
del result["X"]
self.assertEqual(len(result), 2)
# 2 rather than 1 because pydsl adds an extra no-op state
# declaration.
s_iter = iter(result.values())
try:
s = next(s_iter)["file"]
except KeyError:
s = next(s_iter)["file"]
self.assertEqual(s[0], "managed")
self.assertEqual(s[1]["name"], "myfile.txt")
self.assertEqual(s[2]["source"], "salt://path/to/file")
@pytest.mark.slow_test
def test_requisite_declarations(self):
result = self.render_sls(
textwrap.dedent(
"""
state('X').cmd.run('echo hello')
state('A').cmd.run('mkdir tmp', cwd='/var')
state('B').cmd.run('ls -la', cwd='/var/tmp') \
.require(state('X').cmd) \
.require(cmd='A') \
.watch(service='G')
state('G').service.running(name='collectd')
state('G').service.watch_in(state('A').cmd)
state('H').cmd.require_in(cmd='echo hello')
state('H').cmd.run('echo world')
"""
)
)
self.assertEqual(len(result), 6)
self.assertTrue(set("X A B G H".split()).issubset(set(result.keys())))
b = result["B"]["cmd"]
self.assertEqual(b[0], "run")
self.assertEqual(b[1]["name"], "ls -la")
self.assertEqual(b[2]["cwd"], "/var/tmp")
self.assertEqual(b[3]["require"][0]["cmd"], "X")
self.assertEqual(b[4]["require"][0]["cmd"], "A")
self.assertEqual(b[5]["watch"][0]["service"], "G")
self.assertEqual(result["G"]["service"][2]["watch_in"][0]["cmd"], "A")
self.assertEqual(result["H"]["cmd"][1]["require_in"][0]["cmd"], "echo hello")
@pytest.mark.slow_test
def test_include_extend(self):
result = self.render_sls(
textwrap.dedent(
"""
include(
'some.sls.file',
'another.sls.file',
'more.sls.file',
delayed=True
)
A = state('A').cmd.run('echo hoho', cwd='/')
state('B').cmd.run('echo hehe', cwd='/')
extend(
A,
state('X').cmd.run(cwd='/a/b/c'),
state('Y').file('managed', name='a_file.txt'),
state('Z').service.watch(file='A')
)
"""
)
)
self.assertEqual(len(result), 4)
self.assertEqual(
result["include"],
[
{"base": sls}
for sls in ("some.sls.file", "another.sls.file", "more.sls.file")
],
)
extend = result["extend"]
self.assertEqual(extend["X"]["cmd"][0], "run")
self.assertEqual(extend["X"]["cmd"][1]["cwd"], "/a/b/c")
self.assertEqual(extend["Y"]["file"][0], "managed")
self.assertEqual(extend["Y"]["file"][1]["name"], "a_file.txt")
self.assertEqual(len(extend["Z"]["service"]), 1)
self.assertEqual(extend["Z"]["service"][0]["watch"][0]["file"], "A")
self.assertEqual(result["B"]["cmd"][0], "run")
self.assertTrue("A" not in result)
self.assertEqual(extend["A"]["cmd"][0], "run")
@pytest.mark.slow_test
def test_cmd_call(self):
result = self.HIGHSTATE.state.call_template_str(
textwrap.dedent(
"""\
#!pydsl
state('A').cmd.run('echo this is state A', cwd='/')
some_var = 12345
def do_something(a, b, *args, **kws):
return dict(result=True, changes={'a': a, 'b': b, 'args': args, 'kws': kws, 'some_var': some_var})
state('C').cmd.call(do_something, 1, 2, 3, x=1, y=2) \
.require(state('A').cmd)
state('G').cmd.wait('echo this is state G', cwd='/') \
.watch(state('C').cmd)
"""
)
)
ret = next(result[k] for k in result.keys() if "do_something" in k)
changes = ret["changes"]
self.assertEqual(
changes, dict(a=1, b=2, args=(3,), kws=dict(x=1, y=2), some_var=12345)
)
ret = next(result[k] for k in result.keys() if "-G_" in k)
self.assertEqual(ret["changes"]["stdout"], "this is state G")
@pytest.mark.slow_test
def test_multiple_state_func_in_state_mod(self):
with self.assertRaisesRegex(PyDslError, "Multiple state functions"):
self.render_sls(
textwrap.dedent(
"""
state('A').cmd.run('echo hoho')
state('A').cmd.wait('echo hehe')
"""
)
)
@pytest.mark.slow_test
def test_no_state_func_in_state_mod(self):
with self.assertRaisesRegex(PyDslError, "No state function specified"):
self.render_sls(
textwrap.dedent(
"""
state('B').cmd.require(cmd='hoho')
"""
)
)
@pytest.mark.slow_test
def test_load_highstate(self):
result = self.render_sls(
textwrap.dedent(
'''
import salt.utils.yaml
__pydsl__.load_highstate(salt.utils.yaml.safe_load("""
A:
cmd.run:
- name: echo hello
- cwd: /
B:
pkg:
- installed
service:
- running
- require:
- pkg: B
- watch:
- cmd: A
"""))
state('A').cmd.run(name='echo hello world')
'''
)
)
self.assertEqual(len(result), 3)
self.assertEqual(result["A"]["cmd"][0], "run")
self.assertIn({"name": "echo hello"}, result["A"]["cmd"])
self.assertIn({"cwd": "/"}, result["A"]["cmd"])
self.assertIn({"name": "echo hello world"}, result["A"]["cmd"])
self.assertEqual(len(result["A"]["cmd"]), 4)
self.assertEqual(len(result["B"]["pkg"]), 1)
self.assertEqual(result["B"]["pkg"][0], "installed")
self.assertEqual(result["B"]["service"][0], "running")
self.assertIn({"require": [{"pkg": "B"}]}, result["B"]["service"])
self.assertIn({"watch": [{"cmd": "A"}]}, result["B"]["service"])
self.assertEqual(len(result["B"]["service"]), 3)
@pytest.mark.slow_test
def test_ordered_states(self):
result = self.render_sls(
textwrap.dedent(
"""
__pydsl__.set(ordered=True)
A = state('A')
state('B').cmd.run('echo bbbb')
A.cmd.run('echo aaa')
state('B').cmd.run(cwd='/')
state('C').cmd.run('echo ccc')
state('B').file.managed(source='/a/b/c')
"""
)
)
self.assertEqual(len(result["B"]["cmd"]), 3)
self.assertEqual(result["A"]["cmd"][1]["require"][0]["cmd"], "B")
self.assertEqual(result["C"]["cmd"][1]["require"][0]["cmd"], "A")
self.assertEqual(result["B"]["file"][1]["require"][0]["cmd"], "C")
@with_tempdir()
@pytest.mark.slow_test
def test_pipe_through_stateconf(self, dirpath):
output = os.path.join(dirpath, "output")
write_to(
os.path.join(dirpath, "xxx.sls"),
textwrap.dedent(
"""#!stateconf -os yaml . jinja
.X:
cmd.run:
- name: echo X >> {0}
- cwd: /
.Y:
cmd.run:
- name: echo Y >> {0}
- cwd: /
.Z:
cmd.run:
- name: echo Z >> {0}
- cwd: /
""".format(
output.replace("\\", "/")
)
),
)
write_to(
os.path.join(dirpath, "yyy.sls"),
textwrap.dedent(
"""\
#!pydsl|stateconf -ps
__pydsl__.set(ordered=True)
state('.D').cmd.run('echo D >> {0}', cwd='/')
state('.E').cmd.run('echo E >> {0}', cwd='/')
state('.F').cmd.run('echo F >> {0}', cwd='/')
""".format(
output.replace("\\", "/")
)
),
)
write_to(
os.path.join(dirpath, "aaa.sls"),
textwrap.dedent(
"""\
#!pydsl|stateconf -ps
include('xxx', 'yyy')
# make all states in xxx run BEFORE states in this sls.
extend(state('.start').stateconf.require(stateconf='xxx::goal'))
# make all states in yyy run AFTER this sls.
extend(state('.goal').stateconf.require_in(stateconf='yyy::start'))
__pydsl__.set(ordered=True)
state('.A').cmd.run('echo A >> {0}', cwd='/')
state('.B').cmd.run('echo B >> {0}', cwd='/')
state('.C').cmd.run('echo C >> {0}', cwd='/')
""".format(
output.replace("\\", "/")
)
),
)
self.state_highstate({"base": ["aaa"]}, dirpath)
with salt.utils.files.fopen(output, "r") as f:
self.assertEqual("".join(f.read().split()), "XYZABCDEF")
@with_tempdir()
@pytest.mark.slow_test
def test_compile_time_state_execution(self, dirpath):
if not sys.stdin.isatty():
self.skipTest("Not attached to a TTY")
# The Windows shell will include any spaces before the redirect
# in the text that is redirected.
# For example: echo hello > test.txt will contain "hello "
write_to(
os.path.join(dirpath, "aaa.sls"),
textwrap.dedent(
"""\
#!pydsl
__pydsl__.set(ordered=True)
A = state('A')
A.cmd.run('echo hehe>{0}/zzz.txt', cwd='/')
A.file.managed('{0}/yyy.txt', source='salt://zzz.txt')
A()
A()
state().cmd.run('echo hoho>>{0}/yyy.txt', cwd='/')
A.file.managed('{0}/xxx.txt', source='salt://zzz.txt')
A()
""".format(
dirpath.replace("\\", "/")
)
),
)
self.state_highstate({"base": ["aaa"]}, dirpath)
with salt.utils.files.fopen(os.path.join(dirpath, "yyy.txt"), "rt") as f:
self.assertEqual(f.read(), "hehe" + os.linesep + "hoho" + os.linesep)
with salt.utils.files.fopen(os.path.join(dirpath, "xxx.txt"), "rt") as f:
self.assertEqual(f.read(), "hehe" + os.linesep)
@with_tempdir()
@pytest.mark.slow_test
def test_nested_high_state_execution(self, dirpath):
output = os.path.join(dirpath, "output")
write_to(
os.path.join(dirpath, "aaa.sls"),
textwrap.dedent(
"""\
#!pydsl
__salt__['state.sls']('bbb')
state().cmd.run('echo bbbbbb', cwd='/')
"""
),
)
write_to(
os.path.join(dirpath, "bbb.sls"),
textwrap.dedent(
"""
# {{ salt['state.sls']('ccc') }}
test:
cmd.run:
- name: echo bbbbbbb
- cwd: /
"""
),
)
write_to(
os.path.join(dirpath, "ccc.sls"),
textwrap.dedent(
"""
#!pydsl
state().cmd.run('echo ccccc', cwd='/')
"""
),
)
self.state_highstate({"base": ["aaa"]}, dirpath)
@with_tempdir()
@pytest.mark.slow_test
def test_repeat_includes(self, dirpath):
output = os.path.join(dirpath, "output")
write_to(
os.path.join(dirpath, "b.sls"),
textwrap.dedent(
"""\
#!pydsl
include('c')
include('d')
"""
),
)
write_to(
os.path.join(dirpath, "c.sls"),
textwrap.dedent(
"""\
#!pydsl
modtest = include('e')
modtest.success
"""
),
)
write_to(
os.path.join(dirpath, "d.sls"),
textwrap.dedent(
"""\
#!pydsl
modtest = include('e')
modtest.success
"""
),
)
write_to(
os.path.join(dirpath, "e.sls"),
textwrap.dedent(
"""\
#!pydsl
success = True
"""
),
)
self.state_highstate({"base": ["b"]}, dirpath)
self.state_highstate({"base": ["c", "d"]}, dirpath)
def write_to(fpath, content):
with salt.utils.files.fopen(fpath, "w") as f:
f.write(content)
| {
"content_hash": "ee10cae4687814ba34ccb36d4a74f71a",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 114,
"avg_line_length": 33.10058027079304,
"alnum_prop": 0.47840822766318003,
"repo_name": "saltstack/salt",
"id": "33d131bca8b5728a45f643a0edc5f98ed0bcb95d",
"size": "17113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/utils/test_pydsl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
"""
Exploring correlations between two variables
"""
__author__ = 'Diego'
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
import matplotlib
from vcorr.qt_models import VarListModel
matplotlib.use("Qt4Agg")
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from vcorr.gui.correlations_gui import Ui_correlation_app
import numpy as np
import seaborn as sns
import scipy.stats
import pandas as pd
class CorrelationMatrixFigure(FigureCanvas):
SquareSelected = QtCore.pyqtSignal(pd.DataFrame)
def __init__(self,large_df):
self.f, self.ax = plt.subplots(figsize=(9, 9))
plt.tight_layout()
super(CorrelationMatrixFigure, self).__init__(self.f)
palette = self.palette()
self.f.set_facecolor(palette.background().color().getRgbF()[0:3])
self.df = None
self.corr = None
self.cmap = sns.blend_palette(["#00008B", "#6A5ACD", "#F0F8FF",
"#FFE6F8", "#C71585", "#8B0000"], as_cmap=True)
self.mpl_connect("motion_notify_event", self.get_tooltip_message)
self.mpl_connect("button_press_event",self.square_clicked)
self.large_df = large_df
self.on_draw()
def on_draw(self):
plt.sca(self.ax)
plt.clf()
self.ax = plt.axes()
if self.df is None:
message = "Select two or more variables from list"
self.ax.text(0.5, 0.5, message, horizontalalignment='center',
verticalalignment='center', fontsize=16)
else:
plt.sca(self.ax)
sns.corrplot(self.df, annot=False, sig_stars=True, cmap_range="full",
diag_names=False, sig_corr=False, cmap=self.cmap, ax=self.ax, cbar=True)
plt.tight_layout()
self.draw()
def set_variables(self, vars_list):
#print vars_list
if len(vars_list) < 2:
self.df = None
self.corr = None
else:
self.df = self.large_df[vars_list].copy()
self.corr = self.df.corr()
self.on_draw()
def get_tooltip_message(self, event):
QtGui.QToolTip.hideText()
if event.inaxes == self.ax and self.df is not None:
x_int, y_int = int(round(event.xdata)), int(round(event.ydata))
if y_int <= x_int:
return
x_name, y_name = self.df.columns[x_int], self.df.columns[y_int]
r = self.corr.loc[x_name, y_name]
message = "%s v.s. %s: r = %.2f" % (x_name, y_name, r)
_, height = self.get_width_height()
point = QtCore.QPoint(event.x, height - event.y)
g_point = self.mapToGlobal(point)
QtGui.QToolTip.showText(g_point, message)
def square_clicked(self,event):
if event.inaxes == self.ax and self.df is not None:
x_int , y_int = int(round(event.xdata)) , int(round(event.ydata))
if y_int<=x_int:
return
x_name,y_name = self.df.columns[x_int],self.df.columns[y_int]
df2 = self.df[[x_name,y_name]]
self.SquareSelected.emit(df2)
class RegFigure(FigureCanvas):
def __init__(self):
self.f, self.ax = plt.subplots(figsize=(9, 9))
super(RegFigure, self).__init__(self.f)
palette = self.palette()
self.f.set_facecolor(palette.background().color().getRgbF()[0:3])
self.draw_initial_message()
self.mpl_connect("motion_notify_event",self.motion_to_pick)
self.mpl_connect("pick_event",self.draw_tooltip)
self.hidden_subjs = set()
self.df = None
self.df2 = None
self.dfh = None
self.scatter_h_artist=None
self.limits = None
def draw_initial_message(self):
self.ax.clear()
message = "Click in the correlation matrix"
self.ax.text(0.5, 0.5, message, horizontalalignment='center',
verticalalignment='center', fontsize=16)
plt.sca(self.ax)
plt.tight_layout()
self.draw()
def draw_reg(self,df):
assert df.shape[1] == 2
#print df
self.ax.clear()
plt.sca(self.ax)
plt.sca(self.ax)
df = df.dropna()
self.df = df.copy()
plt.tight_layout()
self.limits = None
self.ax.set_xlim(auto=True)
self.ax.set_ylim(auto=True)
self.re_draw_reg()
def re_draw_reg(self):
self.ax.clear()
i2 = [i for i in self.df.index if i not in self.hidden_subjs]
df2 = self.df.loc[i2]
self.df2 = df2
y_name,x_name = df2.columns
x_vals=df2[x_name].get_values()
y_vals=df2[y_name].get_values()
sns.regplot(x_name,y_name,df2,ax=self.ax,scatter_kws={"picker":5,})
mat = np.column_stack((x_vals,y_vals))
mat = mat[np.all(np.isfinite(mat),1),]
m,b,r,p,e = scipy.stats.linregress(mat)
plot_title = "r=%.2f p=%.5g"%(r,p)
self.ax.set_title(plot_title)
#print e
self.ax.set_title(plot_title)
if self.limits is not None:
xl,yl = self.limits
self.ax.set_xlim(xl[0],xl[1],auto=False)
self.ax.set_ylim(yl[0],yl[1],auto=False)
else:
self.limits = (self.ax.get_xlim(),self.ax.get_ylim())
ih = [i for i in self.df.index if i in self.hidden_subjs]
dfh = self.df.loc[ih]
self.dfh = dfh
current_color = matplotlib.rcParams["axes.color_cycle"][0]
self.scatter_h_artist=self.ax.scatter(dfh[x_name].get_values(),dfh[y_name].get_values(),
edgecolors=current_color,facecolors="None",urls=ih,picker=2)
self.draw()
def motion_to_pick(self,event):
self.ax.pick(event)
def draw_tooltip(self,event):
QtGui.QToolTip.hideText()
mouse_event = event.mouseevent
if isinstance(event.artist,matplotlib.collections.PathCollection):
index = event.ind
message_pieces=[]
#if the pick involves different subjects
if event.artist == self.scatter_h_artist:
dfp = self.dfh
else:
dfp = self.df2
for i in index:
datum = dfp.iloc[[i]]
message = "Subject %s\n%s : %g\n%s : %g"%\
(datum.index[0],
datum.columns[0],datum.iloc[0,0],
datum.columns[1],datum.iloc[0,1],)
message_pieces.append(message)
big_message="\n\n".join(message_pieces)
_,height = self.get_width_height()
point = QtCore.QPoint(event.mouseevent.x,height-event.mouseevent.y)
g_point = self.mapToGlobal(point)
QtGui.QToolTip.showText(g_point,big_message)
if mouse_event.button == 1:
if len(index) == 1:
name = datum.index[0]
if event.artist == self.scatter_h_artist:
print "recovering %s"%name
self.hidden_subjs.remove(name)
else:
print "hidding %s"%name
self.hidden_subjs.add(name)
self.re_draw_reg()
def selection_changed(self,selection):
if self.df is None:
return
sel_set = set(selection)
current_vars = set(self.df.columns)
if not current_vars <= sel_set:
#current vars are not contained in current selection
self.df = None
self.draw_initial_message()
def handle_clicks(self,event):
pass
class CorrelationsApp(QtGui.QMainWindow):
def __init__(self,data_frame):
super(CorrelationsApp, self).__init__()
self.ui = None
self.cor_mat = CorrelationMatrixFigure(data_frame)
self.reg_plot = RegFigure()
self.vars_model = VarListModel(checkeable=True)
self.vars_model.set_variables(data_frame.columns)
self.setup_ui()
def setup_ui(self):
self.ui = Ui_correlation_app()
self.ui.setupUi(self)
self.ui.variables_list.setModel(self.vars_model)
self.ui.cor_layout = QtGui.QHBoxLayout()
self.ui.cor_mat_frame.setLayout(self.ui.cor_layout)
self.ui.cor_layout.addWidget(self.cor_mat)
self.vars_model.CheckedChanged.connect(self.cor_mat.set_variables)
self.vars_model.CheckedChanged.connect(self.reg_plot.selection_changed)
self.ui.reg_layout = QtGui.QHBoxLayout()
self.ui.reg_frame.setLayout(self.ui.reg_layout)
self.ui.reg_layout.addWidget(self.reg_plot)
self.cor_mat.SquareSelected.connect(self.reg_plot.draw_reg)
self.ui.actionSave_Matrix.triggered.connect(self.save_matrix)
self.ui.actionSave_Scatter.triggered.connect(self.save_reg)
def save_matrix(self):
filename = unicode(QtGui.QFileDialog.getSaveFileName(self,
"Save Matrix",".","PDF (*.pdf);;PNG (*.png);;svg (*.svg)"))
self.cor_mat.f.savefig(filename)
def save_reg(self):
filename = unicode(QtGui.QFileDialog.getSaveFileName(self,
"Save Scatter",".","PDF (*.pdf);;PNG (*.png);;svg (*.svg)"))
self.reg_plot.f.savefig(filename)
def remove_non_ascii(s):
return str("".join(i for i in s if ord(i)<128))
if __name__ == "__main__":
app = QtGui.QApplication([])
qt_name = QtGui.QFileDialog.getOpenFileName(None,"Select Data",".","csv (*.csv)")
file_name = str(qt_name.toAscii())
try:
with open(file_name) as in_file:
df = pd.read_csv(in_file,na_values="#NULL!",index_col=0)
except Exception:
df = None
if df is None or len(df.columns) == 0:
#try again with "french" excel defaults
df = pd.read_csv(file_name,index_col=0,sep=";",decimal=",")
df.columns = map(remove_non_ascii,df.columns)
main_window = CorrelationsApp(df)
main_window.show()
app.exec_()
| {
"content_hash": "72ef744f245c73b08b505766afa97bb8",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 121,
"avg_line_length": 37.356617647058826,
"alnum_prop": 0.5677590788308238,
"repo_name": "diego0020/correlation_viewer",
"id": "41e596a69c78cf7d992b22f61cfc3644bdb51c07",
"size": "10161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "view_correlations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19459"
}
],
"symlink_target": ""
} |
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'yfc-social'
copyright = u"2015, Nikko Comidoy"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'yfc-socialdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'yfc-social.tex',
u'yfc-social Documentation',
u"Nikko Comidoy", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yfc-social', u'yfc-social Documentation',
[u"Nikko Comidoy"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'yfc-social', u'yfc-social Documentation',
u"Nikko Comidoy", 'yfc-social',
'This is a YFC Social Site', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| {
"content_hash": "11c4d55071f8ae9729d686491f55fc82",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 80,
"avg_line_length": 31.982758620689655,
"alnum_prop": 0.692722371967655,
"repo_name": "nikkomidoy/yfc-social-site",
"id": "5ec0ccf2d5871383a4d504750ada8c7d1b2d73ec",
"size": "7813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "20238"
},
{
"name": "JavaScript",
"bytes": "3507"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "39989"
},
{
"name": "Shell",
"bytes": "4523"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import datetime
import decimal
from io import BytesIO
import pytest
from openpyxl.xml.functions import fromstring, tostring, xmlfile
from openpyxl.reader.excel import load_workbook
from openpyxl import Workbook
from .. worksheet import write_worksheet
from .. relations import write_rels
from openpyxl.tests.helper import compare_xml
from openpyxl.worksheet.properties import PageSetupProperties
from openpyxl.xml.constants import SHEET_MAIN_NS, REL_NS
@pytest.fixture
def worksheet():
from openpyxl import Workbook
wb = Workbook()
return wb.active
@pytest.fixture
def DummyWorksheet():
class DummyWorksheet:
def __init__(self):
self._styles = {}
self.column_dimensions = {}
self.parent = Workbook()
return DummyWorksheet()
@pytest.fixture
def write_cols():
from .. worksheet import write_cols
return write_cols
@pytest.fixture
def ColumnDimension():
from openpyxl.worksheet.dimensions import ColumnDimension
return ColumnDimension
def test_no_cols(write_cols, DummyWorksheet):
cols = write_cols(DummyWorksheet)
assert cols is None
def test_col_widths(write_cols, ColumnDimension, DummyWorksheet):
ws = DummyWorksheet
ws.column_dimensions['A'] = ColumnDimension(worksheet=ws, width=4)
cols = write_cols(ws)
xml = tostring(cols)
expected = """<cols><col width="4" min="1" max="1" customWidth="1"></col></cols>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_col_style(write_cols, ColumnDimension, DummyWorksheet):
from openpyxl.styles import Font
ws = DummyWorksheet
cd = ColumnDimension(worksheet=ws)
ws.column_dimensions['A'] = cd
cd.font = Font(color="FF0000")
cols = write_cols(ws)
xml = tostring(cols)
expected = """<cols><col max="1" min="1" style="1"></col></cols>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_lots_cols(write_cols, ColumnDimension, DummyWorksheet):
from openpyxl.styles import Font
ws = DummyWorksheet
from openpyxl.cell import get_column_letter
for i in range(1, 15):
label = get_column_letter(i)
cd = ColumnDimension(worksheet=ws)
cd.font = Font(name=label)
dict(cd) # create style_id in order for test
ws.column_dimensions[label] = cd
cols = write_cols(ws)
xml = tostring(cols)
expected = """<cols>
<col max="1" min="1" style="1"></col>
<col max="2" min="2" style="2"></col>
<col max="3" min="3" style="3"></col>
<col max="4" min="4" style="4"></col>
<col max="5" min="5" style="5"></col>
<col max="6" min="6" style="6"></col>
<col max="7" min="7" style="7"></col>
<col max="8" min="8" style="8"></col>
<col max="9" min="9" style="9"></col>
<col max="10" min="10" style="10"></col>
<col max="11" min="11" style="11"></col>
<col max="12" min="12" style="12"></col>
<col max="13" min="13" style="13"></col>
<col max="14" min="14" style="14"></col>
</cols>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def write_format():
from .. worksheet import write_format
return write_format
def test_sheet_format(write_format, ColumnDimension, DummyWorksheet):
fmt = write_format(DummyWorksheet)
xml = tostring(fmt)
expected = """<sheetFormatPr defaultRowHeight="15" baseColWidth="10"/>"""
diff = compare_xml(expected, xml)
assert diff is None, diff
def test_outline_format(write_format, ColumnDimension, DummyWorksheet):
worksheet = DummyWorksheet
worksheet.column_dimensions['A'] = ColumnDimension(worksheet=worksheet,
outline_level=1)
fmt = write_format(worksheet)
xml = tostring(fmt)
expected = """<sheetFormatPr defaultRowHeight="15" baseColWidth="10" outlineLevelCol="1" />"""
diff = compare_xml(expected, xml)
assert diff is None, diff
def test_outline_cols(write_cols, ColumnDimension, DummyWorksheet):
worksheet = DummyWorksheet
worksheet.column_dimensions['A'] = ColumnDimension(worksheet=worksheet,
outline_level=1)
cols = write_cols(worksheet)
xml = tostring(cols)
expected = """<cols><col max="1" min="1" outlineLevel="1"/></cols>"""
diff = compare_xml(expected, xml)
assert diff is None, diff
@pytest.fixture
def write_rows():
from .. etree_worksheet import write_rows
return write_rows
@pytest.mark.parametrize("value, expected",
[
(9781231231230, """<c t="n" r="A1"><v>9781231231230</v></c>"""),
(decimal.Decimal('3.14'), """<c t="n" r="A1"><v>3.14</v></c>"""),
(1234567890, """<c t="n" r="A1"><v>1234567890</v></c>"""),
("=sum(1+1)", """<c r="A1"><f>sum(1+1)</f><v></v></c>"""),
(True, """<c t="b" r="A1"><v>1</v></c>"""),
("Hello", """<c t="s" r="A1"><v>0</v></c>"""),
("", """<c r="A1" t="s"></c>"""),
(None, """<c r="A1" t="n"></c>"""),
(datetime.date(2011, 12, 25), """<c r="A1" t="n" s="1"><v>40902</v></c>"""),
])
def test_write_cell(worksheet, value, expected):
from openpyxl.cell import Cell
from .. etree_worksheet import write_cell
ws = worksheet
ws['A1'] = value
el = write_cell(ws, ws['A1'])
xml = tostring(el)
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_formula(worksheet, write_rows):
ws = worksheet
ws.cell('F1').value = 10
ws.cell('F2').value = 32
ws.cell('F3').value = '=F1+F2'
ws.cell('A4').value = '=A1+A2+A3'
ws.formula_attributes['A4'] = {'t': 'shared', 'ref': 'A4:C4', 'si': '0'}
ws.cell('B4').value = '=1'
ws.formula_attributes['B4'] = {'t': 'shared', 'si': '0'}
ws.cell('C4').value = '=1'
ws.formula_attributes['C4'] = {'t': 'shared', 'si': '0'}
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row r="2" spans="1:6">
<c r="F2" t="n">
<v>32</v>
</c>
</row>
<row r="3" spans="1:6">
<c r="F3">
<f>F1+F2</f>
<v></v>
</c>
</row>
<row r="4" spans="1:6">
<c r="A4">
<f ref="A4:C4" si="0" t="shared">A1+A2+A3</f>
<v></v>
</c>
<c r="B4">
<f si="0" t="shared"></f>
<v></v>
</c>
<c r="C4">
<f si="0" t="shared"></f>
<v></v>
</c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_height(worksheet, write_rows):
ws = worksheet
ws.cell('F1').value = 10
ws.row_dimensions[ws.cell('F1').row].height = 30
ws.row_dimensions[ws.cell('F2').row].height = 30
ws._garbage_collect()
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row customHeight="1" ht="30" r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row customHeight="1" ht="30" r="2" spans="1:6"></row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_get_rows_to_write(worksheet):
from .. etree_worksheet import get_rows_to_write
ws = worksheet
ws.cell('A10').value = "test"
ws.row_dimensions[ws.cell('A10').row].height = 30
ws.row_dimensions[ws.cell('C2').row].height = 30
ws._garbage_collect()
cells_by_row = get_rows_to_write(ws)
assert len(cells_by_row) == 2
assert len(cells_by_row[10]) == 1
assert len(cells_by_row[2]) == 0
@pytest.fixture
def write_autofilter():
from .. lxml_worksheet import write_autofilter
return write_autofilter
def test_auto_filter(worksheet, write_autofilter):
ws = worksheet
ws.auto_filter.ref = 'A1:F1'
af = write_autofilter(ws)
xml = tostring(af)
expected = """<autoFilter ref="A1:F1"></autoFilter>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_auto_filter_filter_column(worksheet, write_autofilter):
ws = worksheet
ws.auto_filter.ref = 'A1:F1'
ws.auto_filter.add_filter_column(0, ["0"], blank=True)
af = write_autofilter(ws)
xml = tostring(af)
expected = """
<autoFilter ref="A1:F1">
<filterColumn colId="0">
<filters blank="1">
<filter val="0"></filter>
</filters>
</filterColumn>
</autoFilter>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_auto_filter_sort_condition(worksheet, write_autofilter):
ws = worksheet
ws.cell('A1').value = 'header'
ws.cell('A2').value = 1
ws.cell('A3').value = 0
ws.auto_filter.ref = 'A2:A3'
ws.auto_filter.add_sort_condition('A2:A3', descending=True)
af = write_autofilter(ws)
xml = tostring(af)
expected = """
<autoFilter ref="A2:A3">
<sortState ref="A2:A3">
<sortCondtion descending="1" ref="A2:A3"></sortCondtion>
</sortState>
</autoFilter>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_auto_filter_worksheet(worksheet, write_worksheet):
worksheet.auto_filter.ref = 'A1:F1'
xml = write_worksheet(worksheet, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<autoFilter ref="A1:F1"/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_merge(worksheet):
from .. worksheet import write_mergecells
ws = worksheet
ws.cell('A1').value = 'Cell A1'
ws.cell('B1').value = 'Cell B1'
ws.merge_cells('A1:B1')
merge = write_mergecells(ws)
xml = tostring(merge)
expected = """
<mergeCells count="1">
<mergeCell ref="A1:B1"/>
</mergeCells>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_no_merge(worksheet):
from .. worksheet import write_mergecells
merge = write_mergecells(worksheet)
assert merge is None
def test_header_footer(worksheet):
ws = worksheet
ws.header_footer.left_header.text = "Left Header Text"
ws.header_footer.center_header.text = "Center Header Text"
ws.header_footer.center_header.font_name = "Arial,Regular"
ws.header_footer.center_header.font_size = 6
ws.header_footer.center_header.font_color = "445566"
ws.header_footer.right_header.text = "Right Header Text"
ws.header_footer.right_header.font_name = "Arial,Bold"
ws.header_footer.right_header.font_size = 8
ws.header_footer.right_header.font_color = "112233"
ws.header_footer.left_footer.text = "Left Footer Text\nAnd &[Date] and &[Time]"
ws.header_footer.left_footer.font_name = "Times New Roman,Regular"
ws.header_footer.left_footer.font_size = 10
ws.header_footer.left_footer.font_color = "445566"
ws.header_footer.center_footer.text = "Center Footer Text &[Path]&[File] on &[Tab]"
ws.header_footer.center_footer.font_name = "Times New Roman,Bold"
ws.header_footer.center_footer.font_size = 12
ws.header_footer.center_footer.font_color = "778899"
ws.header_footer.right_footer.text = "Right Footer Text &[Page] of &[Pages]"
ws.header_footer.right_footer.font_name = "Times New Roman,Italic"
ws.header_footer.right_footer.font_size = 14
ws.header_footer.right_footer.font_color = "AABBCC"
from .. lxml_worksheet import write_header_footer
hf = write_header_footer(ws)
xml = tostring(hf)
expected = """
<headerFooter>
<oddHeader>&L&"Calibri,Regular"&K000000Left Header Text&C&"Arial,Regular"&6&K445566Center Header Text&R&"Arial,Bold"&8&K112233Right Header Text</oddHeader>
<oddFooter>&L&"Times New Roman,Regular"&10&K445566Left Footer Text_x000D_And &D and &T&C&"Times New Roman,Bold"&12&K778899Center Footer Text &Z&F on &A&R&"Times New Roman,Italic"&14&KAABBCCRight Footer Text &P of &N</oddFooter>
</headerFooter>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_no_header(worksheet):
from .. lxml_worksheet import write_header_footer
hf = write_header_footer(worksheet)
assert hf is None
def test_hyperlink(worksheet):
from .. lxml_worksheet import write_hyperlinks
ws = worksheet
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com"
hyper = write_hyperlinks(ws)
xml = tostring(hyper)
expected = """
<hyperlinks xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<hyperlink display="http://test.com" r:id="rId1" ref="A1"/>
</hyperlinks>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_no_hyperlink(worksheet):
from .. lxml_worksheet import write_hyperlinks
l = write_hyperlinks(worksheet)
assert l is None
@pytest.mark.xfail
@pytest.mark.pil_required
def test_write_hyperlink_image_rels(Workbook, Image, datadir):
datadir.chdir()
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com/"
i = Image( "plain.png")
ws.add_image(i)
raise ValueError("Resulting file is invalid")
# TODO write integration test with duplicate relation ids then fix
def test_page_breaks(worksheet):
from ..worksheet import write_pagebreaks
ws = worksheet
ws.page_breaks = [1]
xml = tostring(write_pagebreaks(ws))
expected = """
<rowBreaks count="1" manualBreakCount="1">
<brk id="1" man="true" max="16383" min="0"></brk>
</rowBreaks>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_no_pagebreaks(worksheet):
from .. worksheet import write_pagebreaks
pb = write_pagebreaks(worksheet)
assert pb is None
@pytest.fixture
def worksheet_with_cf(worksheet):
from openpyxl.formatting import ConditionalFormatting
worksheet.conditional_formating = ConditionalFormatting()
return worksheet
@pytest.fixture
def write_conditional_formatting():
from .. lxml_worksheet import write_conditional_formatting
return write_conditional_formatting
def test_conditional_formatting_customRule(worksheet_with_cf, write_conditional_formatting):
from .. lxml_worksheet import write_conditional_formatting
ws = worksheet_with_cf
ws.conditional_formatting.add('C1:C10', {'type': 'expression', 'formula': ['ISBLANK(C1)'],
'stopIfTrue': '1', 'dxf': {}})
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="C1:C10">
<cfRule type="expression" stopIfTrue="1" priority="1">
<formula>ISBLANK(C1)</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
def test_conditional_font(worksheet_with_cf, write_conditional_formatting):
"""Test to verify font style written correctly."""
# Create cf rule
from openpyxl.styles import PatternFill, Font, Color
from openpyxl.formatting import CellIsRule
redFill = PatternFill(start_color=Color('FFEE1111'),
end_color=Color('FFEE1111'),
patternType='solid')
whiteFont = Font(color=Color("FFFFFFFF"))
ws = worksheet_with_cf
ws.conditional_formatting.add('A1:A3',
CellIsRule(operator='equal',
formula=['"Fail"'],
stopIfTrue=False,
font=whiteFont,
fill=redFill))
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="A1:A3">
<cfRule operator="equal" priority="1" type="cellIs">
<formula>"Fail"</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
def test_formula_rule(worksheet_with_cf, write_conditional_formatting):
from openpyxl.formatting import FormulaRule
ws = worksheet_with_cf
ws.conditional_formatting.add('C1:C10',
FormulaRule(
formula=['ISBLANK(C1)'],
stopIfTrue=True)
)
cfs = write_conditional_formatting(ws)
xml = b""
for cf in cfs:
xml += tostring(cf)
diff = compare_xml(xml, """
<conditionalFormatting sqref="C1:C10">
<cfRule type="expression" stopIfTrue="1" priority="1">
<formula>ISBLANK(C1)</formula>
</cfRule>
</conditionalFormatting>
""")
assert diff is None, diff
@pytest.fixture
def write_worksheet():
from .. worksheet import write_worksheet
return write_worksheet
def test_write_empty(worksheet, write_worksheet):
ws = worksheet
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_page_margins(worksheet, write_worksheet):
ws = worksheet
ws.page_margins.left = 2.0
ws.page_margins.right = 2.0
ws.page_margins.top = 2.0
ws.page_margins.bottom = 2.0
ws.page_margins.header = 1.5
ws.page_margins.footer = 1.5
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="2" right="2" top="2" bottom="2" header="1.5" footer="1.5"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_printer_settings(worksheet, write_worksheet):
ws = worksheet
ws.page_setup.orientation = ws.ORIENTATION_LANDSCAPE
ws.page_setup.paperSize = ws.PAPERSIZE_TABLOID
ws.page_setup.fitToHeight = 0
ws.page_setup.fitToWidth = 1
ws.print_options.horizontalCentered = True
ws.print_options.verticalCentered = True
page_setup_prop = PageSetupProperties(fitToPage=True)
ws.sheet_properties.pageSetUpPr = page_setup_prop
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr fitToPage="1"/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<printOptions horizontalCentered="1" verticalCentered="1"/>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
<pageSetup orientation="landscape" paperSize="3" fitToHeight="0" fitToWidth="1"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_data_validation(worksheet):
from .. worksheet import write_datavalidation
from openpyxl.worksheet.datavalidation import DataValidation, ValidationType
ws = worksheet
dv = DataValidation(ValidationType.LIST, formula1='"Dog,Cat,Fish"')
dv.add_cell(ws['A1'])
ws.add_data_validation(dv)
xml = write_datavalidation(worksheet)
xml = tostring(xml)
expected = """
<dataValidations xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" count="1">
<dataValidation allowBlank="0" showErrorMessage="1" showInputMessage="1" sqref="A1" type="list">
<formula1>"Dog,Cat,Fish"</formula1>
</dataValidation>
</dataValidations>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_vba(worksheet, write_worksheet):
ws = worksheet
ws.vba_code = {"codeName":"Sheet1"}
ws.vba_controls = "rId2"
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr codeName="Sheet1">
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
<legacyDrawing r:id="rId2"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_vba_comments(datadir, write_worksheet):
datadir.chdir()
fname = 'vba+comments.xlsm'
wb = load_workbook(fname, keep_vba=True)
ws = wb['Form Controls']
sheet = fromstring(write_worksheet(ws, None))
els = sheet.findall('{%s}legacyDrawing' % SHEET_MAIN_NS)
assert len(els) == 1, "Wrong number of legacyDrawing elements %d" % len(els)
assert els[0].get('{%s}id' % REL_NS) == 'vbaControlId'
def test_vba_rels(datadir, write_worksheet):
datadir.chdir()
fname = 'vba+comments.xlsm'
wb = load_workbook(fname, keep_vba=True)
ws = wb['Form Controls']
xml = tostring(write_rels(ws, 1, 1, 1))
expected = """
<ns0:Relationships xmlns:ns0="http://schemas.openxmlformats.org/package/2006/relationships">
<ns0:Relationship Id="vbaControlId" Target="../drawings/vmlDrawing1.vml" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/vmlDrawing"/>
<ns0:Relationship Id="comments" Target="../comments1.xml" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments"/>
</ns0:Relationships>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_protection(worksheet, write_worksheet):
ws = worksheet
ws.protection.enable()
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<sheetProtection sheet="1" objects="0" selectLockedCells="0" selectUnlockedCells="0" scenarios="0" formatCells="1" formatColumns="1" formatRows="1" insertColumns="1" insertRows="1" insertHyperlinks="1" deleteColumns="1" deleteRows="1" sort="1" autoFilter="1" pivotTables="1"/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_comments(worksheet, write_worksheet):
ws = worksheet
worksheet._comment_count = 1
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins bottom="1" footer="0.5" header="0.5" left="0.75" right="0.75" top="1"/>
<legacyDrawing r:id="commentsvml"></legacyDrawing>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_with_tab_color(worksheet, write_worksheet):
ws = worksheet
ws.sheet_properties.tabColor = "F0F0F0"
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<tabColor rgb="00F0F0F0"/>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_with_fit_to_page(worksheet, write_worksheet):
ws = worksheet
ws.page_setup.fitToPage = True
ws.page_setup.autoPageBreaks = False
xml = write_worksheet(ws, None)
expected = """
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr fitToPage="1" autoPageBreaks="0"/>
</sheetPr>
<dimension ref="A1:A1"/>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<pageMargins left="0.75" right="0.75" top="1" bottom="1" header="0.5" footer="0.5"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| {
"content_hash": "2a59bb23eeac0fc84041c9cf41d3be62",
"timestamp": "",
"source": "github",
"line_count": 855,
"max_line_length": 309,
"avg_line_length": 32.94385964912281,
"alnum_prop": 0.6174601484006107,
"repo_name": "Darthkpo/xtt",
"id": "6d17c1c30671e7bc552dfbd1bf1a842c3832edc1",
"size": "28167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openpyxl/writer/tests/test_worksheet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "794012"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext as _
from horizon import exceptions
from horizon import forms
from akanda.horizon.api import neutron_extensions_client
from akanda.horizon.tabs import firewall_tab_redirect
from akanda.horizon.firewall.forms import (
CreateFirewallRuleForm, EditFirewallRuleForm)
class CreateFirewallRuleView(forms.ModalFormView):
form_class = CreateFirewallRuleForm
template_name = 'akanda/firewall/create.html'
success_url = reverse_lazy('horizon:project:networking:index')
def get_success_url(self):
url = super(CreateFirewallRuleView, self).get_success_url()
return "%s?tab=%s" % (url, firewall_tab_redirect())
class EditFirewallRuleView(forms.ModalFormView):
form_class = EditFirewallRuleForm
template_name = 'akanda/firewall/edit.html'
success_url = reverse_lazy('horizon:project:networking:index')
def get_success_url(self):
url = super(EditFirewallRuleView, self).get_success_url()
return "%s?tab=%s" % (url, firewall_tab_redirect())
def _get_object(self, ):
if not hasattr(self, "_object"):
try:
self._object = neutron_extensions_client.filterrule_get(
self.request, self.kwargs['firewall_rule_id'])
except:
msg = _('Unable to retrieve firewall rule.')
redirect = self.get_success_url()
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_context_data(self, **kwargs):
context = super(EditFirewallRuleView, self).get_context_data(**kwargs)
context['firewall_rule'] = self._get_object()
return context
def get_initial(self):
rule = self._get_object()
source_id = rule.get('source', {}).get('id', '')
destination_id = rule.get('destination', {}).get('id', '')
initial_data = {
'id': self.kwargs['firewall_rule_id'],
# 'source_id': rule['source']['id'],
'source_id': source_id,
'source_public_port': rule['source_port'],
'source_protocol': rule['protocol'],
# 'destination_id': rule['destination']['id'],
'destination_id': destination_id,
'destination_public_port': rule['destination_port'],
'destination_protocol': rule['protocol'],
'policy': rule['action'],
}
return initial_data
| {
"content_hash": "3219363e7f0d2a7da54f09b408868741",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 38.56923076923077,
"alnum_prop": 0.6310331072995612,
"repo_name": "dreamhost/akanda-horizon",
"id": "4ffc2709c629b273ad294258f0cfcf2ae2a5f5c4",
"size": "3115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "akanda/horizon/firewall/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151"
},
{
"name": "JavaScript",
"bytes": "122"
},
{
"name": "Python",
"bytes": "141454"
}
],
"symlink_target": ""
} |
def can_build(env, platform):
if not env["tools"]:
return False
# Depends on Embree library, which only supports x86_64 and aarch64.
if env["arch"].startswith("rv") or env["arch"].startswith("ppc"):
return False
if platform == "android":
return env["android_arch"] in ["arm64v8", "x86_64"]
if platform in ["javascript", "server"]:
return False
if env["bits"] == "32":
return False
return True
def configure(env):
pass
| {
"content_hash": "4201de3e30bb15167c3176624fb85171",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 22.59090909090909,
"alnum_prop": 0.5935613682092555,
"repo_name": "ex/godot",
"id": "3bad13f479a14bdab1783bbed9e1fcb5b7d79c60",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/3.5",
"path": "modules/raycast/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "1633"
},
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "C",
"bytes": "1045182"
},
{
"name": "C#",
"bytes": "1061492"
},
{
"name": "C++",
"bytes": "39315087"
},
{
"name": "CMake",
"bytes": "606"
},
{
"name": "GAP",
"bytes": "62"
},
{
"name": "GDScript",
"bytes": "323212"
},
{
"name": "GLSL",
"bytes": "836846"
},
{
"name": "Java",
"bytes": "595274"
},
{
"name": "JavaScript",
"bytes": "194742"
},
{
"name": "Kotlin",
"bytes": "84098"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Objective-C",
"bytes": "20550"
},
{
"name": "Objective-C++",
"bytes": "365306"
},
{
"name": "PowerShell",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "475722"
},
{
"name": "Shell",
"bytes": "30899"
}
],
"symlink_target": ""
} |
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
import inspect
from . import defaults
import os
DB_PORT = 27017
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SECURE_MODE = not DEBUG_MODE # Disable osf secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
API_DOMAIN = PROTOCOL + 'localhost:8000/'
SEARCH_ENGINE = 'elastic'
USE_EMAIL = False
USE_CELERY = False
USE_GNUPG = False
# Email
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Session
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
SESSION_COOKIE_SECURE = SECURE_MODE
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# In-memory result backend
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'memory'
USE_CDN_FOR_CLIENT_LIBS = False
SENTRY_DSN = None
TEST_DB_NAME = DB_NAME = 'osf_test'
VARNISH_SERVERS = ['http://localhost:8080']
# if ENABLE_VARNISH isn't set in python read it from the env var and set it
locals().setdefault('ENABLE_VARNISH', os.environ.get('ENABLE_VARNISH') == 'True')
| {
"content_hash": "3862d9022d534e8c1426cdd1baa89901",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 81,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.7145061728395061,
"repo_name": "abought/osf.io",
"id": "356ece0af90ba810b47d3bda960bd543f7ad1f99",
"size": "1320",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "website/settings/local-travis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157412"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1634802"
},
{
"name": "Mako",
"bytes": "666400"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5569606"
}
],
"symlink_target": ""
} |
import time
import logging
import asyncio
import mocores.core.runtime.actor_base
import mocores.core.util
import mocores.core.actor_pool
import mocores.core.worker_thread
from mocores.core.util.message_queue import MessageQueue
from mocores.core.runtime.membership_table import MembershipTable, MembershipTableEntry
class RuntimeWorker(object):
def __init__(self, cluster_id, service_id, ip, port, single_node_mode=False):
print("new worker")
self.cluster_id = cluster_id
self.service_id = service_id
self.single_node_mode = single_node_mode
self.ip = ip
self.port = port
self.start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime(time.time()))
self.messages = MessageQueue()
self.worker_threads = []
self.membership_table = MembershipTable()
async def handle_packet(self, reader, writer):
data = await reader.read(100)
message = data.decode()
addr = writer.get_extra_info('peername')
print("Received %r from %r" % (message, addr))
print("Send: %r" % message)
writer.write(data)
await writer.drain()
print("Close the client socket")
writer.close()
def run(self):
logging.info("start server")
# add self to membership table
if self.single_node_mode:
self.membership_table.add_entry(
MembershipTableEntry(
self.cluster_id,
self.ip,
self.port,
self.start_time,
True))
else:
pass
logging.info("start worker threads")
for i in range(4):
self.worker_threads.append(mocores.core.worker_thread.WorkerThread())
self.worker_threads[i].start()
logging.debug("wait for connections")
loop = asyncio.get_event_loop()
coro = asyncio.start_server(lambda reader, writer: self.handle_packet(reader, writer), self.ip, self.port, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
def get_actor(self, actor_type, actor_id):
actor_ref_type = mocores.core.runtime.actor_base.actor_ref(actor_type)
actor_class = actor_type.__module__ + "." + actor_type.__name__
return actor_ref_type(actor_class, actor_id) | {
"content_hash": "71cf23e9aa1f0eef9a99015774468018",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 34.30769230769231,
"alnum_prop": 0.6068759342301944,
"repo_name": "toyteam/Mocores",
"id": "02a681d61ca0d88c2cc3f1689bf4e08b3f727cfc",
"size": "2676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mocores/core/runtime/runtime_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4337"
},
{
"name": "C++",
"bytes": "18953"
},
{
"name": "CMake",
"bytes": "11293"
},
{
"name": "Python",
"bytes": "36587"
}
],
"symlink_target": ""
} |
"""
bace dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
from deepchem.molnet.load_function.bace_features import bace_user_specified_features
BACE_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/bace.csv"
BACE_REGRESSION_TASKS = ["pIC50"]
BACE_CLASSIFICATION_TASKS = ["Class"]
class _BaceLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "bace.csv")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=BACE_URL, dest_dir=self.data_dir)
loader = dc.data.CSVLoader(
tasks=self.tasks, feature_field="mol", featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_bace_regression(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['normalization'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
""" Load BACE dataset, regression labels
The BACE dataset provides quantitative IC50 and qualitative (binary label)
binding results for a set of inhibitors of human beta-secretase 1 (BACE-1).
All data are experimental values reported in scientific literature over the
past decade, some with detailed crystal structures available. A collection
of 1522 compounds is provided, along with the regression labels of IC50.
Scaffold splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "mol" - SMILES representation of the molecular structure
- "pIC50" - Negative log of the IC50 binding affinity
- "class" - Binary labels for inhibitor
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Subramanian, Govindan, et al. "Computational modeling of β-secretase 1
(BACE-1) inhibitors using ligand based approaches." Journal of chemical
information and modeling 56.10 (2016): 1936-1949.
"""
loader = _BaceLoader(featurizer, splitter, transformers,
BACE_REGRESSION_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('bace_r', reload)
def load_bace_classification(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
""" Load BACE dataset, classification labels
BACE dataset with classification labels ("class").
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
"""
loader = _BaceLoader(featurizer, splitter, transformers,
BACE_CLASSIFICATION_TASKS, data_dir, save_dir, **kwargs)
return loader.load_dataset('bace_c', reload)
| {
"content_hash": "5f63d151e209c66f562c08fbe457f3e2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 91,
"avg_line_length": 42.577235772357724,
"alnum_prop": 0.7055566163834256,
"repo_name": "lilleswing/deepchem",
"id": "e1afacce4fe66be99b537712bda4d445b1add5ad",
"size": "5238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepchem/molnet/load_function/bace_datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2597968"
},
{
"name": "Shell",
"bytes": "11491"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. ST metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ST = PhoneMetadata(id='ST', country_code=239, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:22|9\\d)\\d{5}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='22\\d{5}', example_number='2221234', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='900[5-9]\\d{3}|9(?:0[1-9]|[89]\\d)\\d{4}', example_number='9812345', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[29]'])])
| {
"content_hash": "1e85671fb54ab2c03368854228fd36f5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 143,
"avg_line_length": 87,
"alnum_prop": 0.6925287356321839,
"repo_name": "daviddrysdale/python-phonenumbers",
"id": "17279069203955ffe02fd286db973ce1dc299867",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/data/region_ST.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3898"
},
{
"name": "Makefile",
"bytes": "9034"
},
{
"name": "Python",
"bytes": "22052087"
},
{
"name": "Ruby",
"bytes": "237"
}
],
"symlink_target": ""
} |
from zoof.lib.yoton3 import Connection
c1 = Connection()
c2 = Connection()
c1.bind('localhost:yoton3test')
c2.connect('localhost:yoton3test') | {
"content_hash": "f9ed01ccfa1dd8e28af86cb57001087c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 18,
"alnum_prop": 0.7638888888888888,
"repo_name": "zoofIO/zoof",
"id": "7c09bb3c023143982a6995f03c9fedb2ac72c29d",
"size": "144",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zoof/lib/tests/yoton3_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "69144"
}
],
"symlink_target": ""
} |
clusters = [cluster, cluster, cluster]
cluster = [{gene_id:1, phage_id:1, translation:"proteinString", start:0, end:227, locus_tag:"name",blastp:[hits],clustulo:[diffHits]}, {gene}, {gene}]
hits = [{query_id:"same as above gene_id", subject_id:222, query_start:1, subject_start:4, e_value:float, percent_identity(ident):float}, {hit}, {hit}]
startCodons = ["ATG","GTG","TGT"]
for cluster in clusters:
# Build a dictionary of golden genes
{1:[gene_ids], 2:[gene_ids]}
for gene in cluster:
if gene != golden:
# find a one to one match within the golden phage dictionary starting at 1
# one to one is the same start codon
listOfTuples = [tuple{postAdjustmentDistance, goldenRank, %ident, ...more?}]
for each goldenHit in gene[blastp]:
relative_gene_start = hit["query_start"]
relative_golden_start = hit["subject_start"]
idealMoveDistance = abs(relative_gene_start - relative_golden_start)
phageGenome = getPhageGenome(gene['phage_id'])
if gene_start != 1 and golden_start != 1:
do nothing
elif:
bestGeneStart = gene['start']
if gene going forward (absolute_gene_start > absolute_gene_end):
if tooLong(relative_golden_start == 1):
move -> along genome (increasing the start)
for i in range(1,ideal_move_distance)
currentStart = gene['start'] + (3 * i) # increase our start
codon = phageGenome[currentStart:currentStart+3] # codon is going forward
if codon in startCodons:
bestGeneStart = currentStart
elif tooShort(relative_gene_start == 1):
move <- along genome (decreasing the start)
for i in range(1,ideal_move_distance)
currentStart = gene['start'] - (3 * i) # decrease our start
codon = phageGenome[currentStart:currentStart+3] # codon is going forward
if codon in startCodons:
bestGeneStart = currentStart
elif gene going backward (absolute_gene_end > absolute_gene_start):
if tooLong(relative_golden_start == 1):
move <- along genome (decreasing the start)
for i in range(1,ideal_move_distance)
currentStart = gene['start'] - (3 * i) # decrease our start
codon = phageGenome[currentStart-3:currentStart] # codon is going backward
codon = codon[::-1] # reverses codon because we're going backwards
if codon in startCodons:
bestGeneStart = currentStart
elif tooShort(relative_gene_start == 1):
move -> along genome (increasing the start)
for i in range(1,ideal_move_distance)
currentStart = gene['start'] + (3 * i) #increase our start
codon = phageGenome[currentStart-3:currentStart] # codon is going backward
codon = codon[::-1] # reverses codon because we're going backwards
if codon in startCodons:
bestGeneStart = currentStart
#by the time we get here bestGeneStart should have the best index of the genome for our new start.
| {
"content_hash": "c2a9bf01ad1946e985c640401b4f042b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 151,
"avg_line_length": 43,
"alnum_prop": 0.5784515749190462,
"repo_name": "pjtatlow/geneysis",
"id": "341f4d478b74893a81e5c0968a2acff01ff44d23",
"size": "3397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pseudoCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2886"
},
{
"name": "HTML",
"bytes": "6965"
},
{
"name": "JavaScript",
"bytes": "15314"
},
{
"name": "Python",
"bytes": "61961"
},
{
"name": "Shell",
"bytes": "1064"
}
],
"symlink_target": ""
} |
from .version import __version__
| {
"content_hash": "97e8ab60232d4cb924b59f1cd0e2a9fe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.7058823529411765,
"repo_name": "arokem/MRS-old",
"id": "7f15ca08581a5ae8147af744e7f841260342dd38",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MRS/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "160669"
}
],
"symlink_target": ""
} |
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from django.template.loader import render_to_string
from debug_toolbar.panels import DebugPanel
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is provided,
returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = ThreadTrackingHandler()
logging.root.setLevel(logging.NOTSET)
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_records()
return records
def title(self):
return "Logging (%s message%s)" % (len(handler.get_records()), (len(handler.get_records()) == 1) and '' or 's')
def url(self):
return ''
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': record.pathname,
'line': record.lineno,
})
return render_to_string('debug_toolbar/panels/logger.html', {'records': records})
| {
"content_hash": "6017db3a8119b6d8892dc124308894c9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 119,
"avg_line_length": 31.685714285714287,
"alnum_prop": 0.6167718665464382,
"repo_name": "LongMan/django-debug-toolbar",
"id": "cb881481c050b000970915be4c06dc5d023b3169",
"size": "2218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "debug_toolbar/panels/logger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "8948"
},
{
"name": "Python",
"bytes": "29976"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import time
import inspect
import solvebio
from .version import VERSION
from .errors import SolveError
from .utils.validators import validate_api_host_url
import platform
import requests
import textwrap
import logging
from requests import Session
from requests import codes
from requests.auth import AuthBase
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from six.moves.urllib.parse import urljoin
# Try using pyopenssl if available.
# Requires: pip install pyopenssl ndg-httpsclient pyasn1
# See http://urllib3.readthedocs.org/en/latest/contrib.html#module-urllib3.contrib.pyopenssl # noqa
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
logger = logging.getLogger('solvebio')
def _handle_api_error(response):
if response.status_code not in [400, 401, 403, 404]:
logger.info('API Error: %d' % response.status_code)
raise SolveError(response=response)
def _handle_request_error(e):
if isinstance(e, requests.exceptions.RequestException):
msg = SolveError.default_message
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with SolveBio.\n"
"It looks like there's probably a configuration "
"issue locally.\nIf this problem persists, let us "
"know at support@solvebio.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise SolveError(message=msg)
class SolveTokenAuth(AuthBase):
"""Custom auth handler for SolveBio API token authentication"""
def __init__(self, token=None, token_type='Token'):
self.token = token
self.token_type = token_type
if not self.token:
# Prefer the OAuth2 access token over the API key.
if solvebio.access_token:
self.token_type = 'Bearer'
self.token = solvebio.access_token
elif solvebio.api_key:
self.token_type = 'Token'
self.token = solvebio.api_key
def __call__(self, r):
if self.token:
r.headers['Authorization'] = '{0} {1}'.format(self.token_type,
self.token)
return r
def __repr__(self):
if self.token:
return self.token_type
else:
return 'Anonymous'
class SolveClient(object):
"""A requests-based HTTP client for SolveBio API resources"""
def __init__(self, host=None, token=None, token_type='Token',
include_resources=True):
self.set_host(host)
self.set_token(token, token_type)
self._headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Encoding': 'gzip,deflate'
}
self.set_user_agent()
# Use a session with a retry policy to handle
# intermittent connection errors.
retries = Retry(
total=5,
backoff_factor=0.1,
status_forcelist=[
codes.bad_gateway,
codes.service_unavailable,
codes.gateway_timeout
])
adapter = HTTPAdapter(max_retries=retries)
self._session = Session()
self._session.mount(self._host, adapter)
# Import all resources into the client
if include_resources:
skip = ('SolveError', 'SolveClient',)
for name, class_ in inspect.getmembers(solvebio, inspect.isclass):
if name in skip:
continue
subclass = type(name, (class_,), {'_client': self})
setattr(self, name, subclass)
def set_host(self, host=None):
self._host = validate_api_host_url(host or solvebio.api_host)
def set_token(self, token=None, token_type='Token'):
self._auth = SolveTokenAuth(token, token_type)
def set_user_agent(self, name=None, version=None):
ua = 'solvebio-python-client/{} python-requests/{} {}/{}'.format(
VERSION,
requests.__version__,
platform.python_implementation(),
platform.python_version()
)
# Prefix the name of the app or script before the
# default user-agent.
if name:
name = name.replace(' ', '-')
if version:
ua = '{}/{} {}'.format(name, version, ua)
else:
ua = '{} {}'.format(name, ua)
self._headers['User-Agent'] = ua
def whoami(self):
return self.get('/v1/user', {})
def get(self, url, params, **kwargs):
"""Issues an HTTP GET across the wire via the Python requests
library. See *request()* for information on keyword args."""
kwargs['params'] = params
return self.request('GET', url, **kwargs)
def post(self, url, data, **kwargs):
"""Issues an HTTP POST across the wire via the Python requests
library. See *request* for information on keyword args."""
kwargs['data'] = data
return self.request('POST', url, **kwargs)
def delete(self, url, data, **kwargs):
"""Issues an HTTP DELETE across the wire via the Python requests
library. See *request* for information on keyword args."""
kwargs['data'] = data
return self.request('DELETE', url, **kwargs)
def request(self, method, url, **kwargs):
"""
Issues an HTTP Request across the wire via the Python requests
library.
Parameters
----------
method : str
an HTTP method: GET, PUT, POST, DELETE, ...
url : str
the place to connect to. If the url doesn't start
with a protocol (https:// or http://), we'll slap
solvebio.api_host in the front.
allow_redirects: bool, optional
set *False* we won't follow any redirects
headers: dict, optional
Custom headers can be provided here; generally though this
will be set correctly by default dependent on the
method type. If the content type is JSON, we'll
JSON-encode params.
param : dict, optional
passed as *params* in the requests.request
timeout : int, optional
timeout value in seconds for the request
raw: bool, optional
unless *True* the response encoded to json
files: file
File content in the form of a file handle which is to be
uploaded. Files are passed in POST requests
Returns
-------
response object. If *raw* is not *True* and
repsonse if valid the object will be JSON encoded. Otherwise
it will be the request.reposne object.
"""
opts = {
'allow_redirects': True,
'auth': self._auth,
'data': {},
'files': None,
'headers': dict(self._headers),
'params': {},
'timeout': 80,
'verify': True
}
raw = kwargs.pop('raw', False)
debug = kwargs.pop('debug', False)
opts.update(kwargs)
method = method.upper()
if opts['files']:
# Don't use application/json for file uploads or GET requests
opts['headers'].pop('Content-Type', None)
else:
opts['data'] = json.dumps(opts['data'])
if not url.startswith(self._host):
url = urljoin(self._host, url)
logger.debug('API %s Request: %s' % (method, url))
if debug:
self._log_raw_request(method, url, **opts)
try:
response = self._session.request(method, url, **opts)
except Exception as e:
_handle_request_error(e)
if 429 == response.status_code:
delay = int(response.headers['retry-after']) + 1
logger.warn('Too many requests. Retrying in {0}s.'.format(delay))
time.sleep(delay)
return self.request(method, url, **kwargs)
if not (200 <= response.status_code < 400):
_handle_api_error(response)
# 204 is used on deletion. There is no JSON here.
if raw or response.status_code in [204, 301, 302]:
return response
try:
return response.json()
except Exception:
raise SolveError("Could not parse JSON response: {}"
.format(response.content))
def _log_raw_request(self, method, url, **kwargs):
from requests import Request, Session
req = Request(method=method.upper(), url=url,
data=kwargs['data'], params=kwargs['params'])
prepped = Session().prepare_request(req, )
logger.debug(prepped.headers)
logger.debug(prepped.body)
def __repr__(self):
return '<SolveClient {0} {1}>'.format(self._host, self._auth)
client = SolveClient(include_resources=False)
| {
"content_hash": "143ba6adfed3d3d08b3d94bd14b97243",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 100,
"avg_line_length": 32.56643356643357,
"alnum_prop": 0.5768735237277217,
"repo_name": "solvebio/solvebio-python",
"id": "a77b93b9ad9a9045234a37073ff48989f5ed50ad",
"size": "9338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solvebio/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9734"
},
{
"name": "Makefile",
"bytes": "1500"
},
{
"name": "Python",
"bytes": "417097"
},
{
"name": "SCSS",
"bytes": "969"
},
{
"name": "Shell",
"bytes": "5849"
}
],
"symlink_target": ""
} |
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247
show_server_diagnostics = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'state': {
'type': 'string', 'enum': [
'pending', 'running', 'paused', 'shutdown', 'crashed',
'suspended']
},
'driver': {
'type': 'string', 'enum': [
'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv']
},
'hypervisor': {'type': ['string', 'null']},
'hypervisor_os': {'type': ['string', 'null']},
'uptime': {'type': ['integer', 'null']},
'config_drive': {'type': 'boolean'},
'num_cpus': {'type': 'integer'},
'num_nics': {'type': 'integer'},
'num_disks': {'type': 'integer'},
'memory_details': {
'type': 'object',
'properties': {
'maximum': {'type': ['integer', 'null']},
'used': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['maximum', 'used']
},
'cpu_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'null']},
'time': {'type': ['integer', 'null']},
'utilisation': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['id', 'time', 'utilisation']
}
},
'nic_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'mac_address': {'oneOf': [parameter_types.mac_address,
{'type': 'null'}]},
'rx_octets': {'type': ['integer', 'null']},
'rx_errors': {'type': ['integer', 'null']},
'rx_drop': {'type': ['integer', 'null']},
'rx_packets': {'type': ['integer', 'null']},
'rx_rate': {'type': ['integer', 'null']},
'tx_octets': {'type': ['integer', 'null']},
'tx_errors': {'type': ['integer', 'null']},
'tx_drop': {'type': ['integer', 'null']},
'tx_packets': {'type': ['integer', 'null']},
'tx_rate': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['mac_address', 'rx_octets', 'rx_errors',
'rx_drop',
'rx_packets', 'rx_rate', 'tx_octets',
'tx_errors',
'tx_drop', 'tx_packets', 'tx_rate']
}
},
'disk_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'read_bytes': {'type': ['integer', 'null']},
'read_requests': {'type': ['integer', 'null']},
'write_bytes': {'type': ['integer', 'null']},
'write_requests': {'type': ['integer', 'null']},
'errors_count': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['read_bytes', 'read_requests', 'write_bytes',
'write_requests', 'errors_count']
}
}
},
'additionalProperties': False,
'required': [
'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime',
'config_drive', 'num_cpus', 'num_nics', 'num_disks',
'memory_details', 'cpu_details', 'nic_details', 'disk_details'],
}
}
# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
list_servers = copy.deepcopy(servers247.list_servers)
get_remote_consoles = copy.deepcopy(servers247.get_remote_consoles)
list_tags = copy.deepcopy(servers247.list_tags)
update_all_tags = copy.deepcopy(servers247.update_all_tags)
delete_all_tags = copy.deepcopy(servers247.delete_all_tags)
check_tag_existence = copy.deepcopy(servers247.check_tag_existence)
update_tag = copy.deepcopy(servers247.update_tag)
delete_tag = copy.deepcopy(servers247.delete_tag)
get_server = copy.deepcopy(servers247.get_server)
list_servers_detail = copy.deepcopy(servers247.list_servers_detail)
update_server = copy.deepcopy(servers247.update_server)
rebuild_server = copy.deepcopy(servers247.rebuild_server)
rebuild_server_with_admin_pass = copy.deepcopy(
servers247.rebuild_server_with_admin_pass)
attach_volume = copy.deepcopy(servers247.attach_volume)
show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
| {
"content_hash": "c955f068a23937257f6239fef5d886df",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 46.208333333333336,
"alnum_prop": 0.46348061316501354,
"repo_name": "cisco-openstack/tempest",
"id": "e2e45bc29e615fed06c6d84a771c74dda7d3524b",
"size": "6151",
"binary": false,
"copies": "2",
"ref": "refs/heads/proposed",
"path": "tempest/lib/api_schema/response/compute/v2_48/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
} |
"""
SPEED: Tests calculations for evapotranspiration calculations; those in the ET subclass in speedcalc.py
GitHub repository: https://github.com/maplion/SPEED
@author: Ryan Dammrose aka MapLion
"""
import unittest
import speedcalc
import test_logging
__author__ = "Ryan Dammrose"
__copyright__ = "Copyright 2015"
__license__ = "MIT"
log = test_logging.TestLogging()
sc_ET = speedcalc.ET()
class TestCalculations_ET(unittest.TestCase):
def test_massFluxToWaterEvaporated(self):
"""
Is mass flux correctly converting to the amount of Water Evaporated?
"""
testName = "test_massFluxToWaterEvaporated"
try:
log.print_test_begin(testName)
# ------------------------------------
ET_mf = 3.0e-5 # mass flux rate in kg/(m^2*s)
ET_mm_per_day = sc_ET.mass_flux_to_water_evaporated(ET_mf)
self.assertEquals(round(ET_mm_per_day, 6), 2.592000)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
def test_energyFluxToWaterEvaporated(self):
"""
Is Energy flux correctly converting to the amount of Water Evaporated?
"""
testName = "test_energyFluxToWaterEvaporated"
try:
log.print_test_begin(testName)
# ------------------------------------
ET_ef = 30 # energy flux rate in W/m^2
ET_ef_2 = 100 # energy flux rate in W/m^2
ET_mm_per_day = sc_ET.energy_flux_to_water_evaporated(ET_ef)
ET_mm_per_day_2 = sc_ET.energy_flux_to_water_evaporated(ET_ef_2)
self.assertEquals(round(ET_mm_per_day, 2), 1.15)
self.assertEquals(round(ET_mm_per_day_2, 2), 3.82)
# ------------------------------------
log.print_test_success(testName)
except:
log.print_test_failure(testName)
self.fail(msg=testName[testName.rfind("_")+1:] + "() FAILED")
##########################################################################################
def suite():
"""
Gather all the tests from this module in a test suite.
"""
_suite = unittest.TestSuite()
_suite.addTest(TestCalculations_ET('test_massFluxToWaterEvaporated'))
_suite.addTest(TestCalculations_ET('test_energyFluxToWaterEvaporated'))
return _suite
| {
"content_hash": "ada9c9a86663b23e1d46404017ea7ad2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 36.51470588235294,
"alnum_prop": 0.5561820378574305,
"repo_name": "maplion/SPEED",
"id": "f565af091387f6b78896c4dfe8dec0e8b3481be1",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testCalculations/ET.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106864"
}
],
"symlink_target": ""
} |
import requests
import traceback
import sys
from bs4 import BeautifulSoup
import csv
## getHTML
def getHTML():
url = "http://en.wikipedia.org/wiki/Mobile_country_code"
html = ""
try :
r = requests.get(url)
html = r.text.encode("utf-8")
except:
traceback.print_exc()
return html
return html
## end
## getHeaders
def getHeaders():
return ["MCC", "Country Name"]
## end
## getRows
def getRows(contextDiv):
## Find all h4's and table.wikitable
h4List = contextDiv.find_all("h4")
tableList = contextDiv.find_all("table", attrs={"class": "wikitable"})
## Store result in a set to avoid dupes
resultSet = set()
## We loop the min of the lists len
loopLen = min(len(h4List), len(tableList))
for i in xrange(loopLen):
## Select "h4 span a" that contains country name
h4 = h4List[i]
a = h4.select("span a")
## If we don't find appropriate "a" tag, exit with error
if (len(a) < 1):
print resultSet
print "Couldn't find link for " + str(h4) + ", i = " + str(i)
sys.exit(1)
## Skip the test network, hence wikitable will always be i+1 lookup
if i+1 == len(tableList):
break
## Grab wikitable
table = tableList[i+1]
## Find all "tr" and skip the header "tr"
trList = table.find_all("tr")
skipFirst = False
for trElem in trList:
if not skipFirst:
skipFirst = True
continue
## Find mcc
mccNode = trElem.find("td")
mcc = ""
if mccNode != None:
mcc = mccNode.get_text()
## Country name
countryName = a[0].get_text()
## Add (mcc, country name) to the set if not empty
if mcc != "" and countryName != "":
res = (mcc.encode("utf-8").strip(), countryName.encode("utf-8").strip())
resultSet.add(res)
# convert back to list and sort it based on MCC
resultList = list(resultSet)
resultList.sort(key=lambda tup: tup[0])
return resultList
## end
## Grab html from wiki
html = getHTML()
if html == "":
print "HTML retrieve is empty, cannot proceed!!!"
sys.exit(1)
## Use BeautifulSoup to extract headers and rows
soup = BeautifulSoup(html)
contextDiv = soup.find("div", attrs={"id": "mw-content-text"})
headers = getHeaders()
rows = getRows(contextDiv)
## Write to file
outputFileName = "mcc-wiki.csv"
try :
f = open(outputFileName, "wb")
writer = csv.writer(f, delimiter=",", quoting = csv.QUOTE_MINIMAL)
writer.writerow(headers)
writer.writerows(rows)
except:
traceback.print_exc()
sys.exit(1)
| {
"content_hash": "b418c5dc60d808cb500c33c0d1a3b217",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 28.6875,
"alnum_prop": 0.5787944807552651,
"repo_name": "ravikiranj/mcc-mnc-csv",
"id": "a6c6e5bcfba459dc2088413f710a6068d46c4b19",
"size": "2777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getMccDataFromWiki.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4214"
}
],
"symlink_target": ""
} |
"""
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - Luc Jean - luc.jean@gmail.com
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
"""
import serial
import modbus_tk
import modbus_tk.defines as cst
from modbus_tk import modbus_rtu
import argparse
PORT = 1
#PORT = '/dev/ttyp5'
PORT = "/dev/ttys002"
parser = argparse.ArgumentParser(description='Modbus Slave')
parser.add_argument('ports', type=str,
help='port name', nargs=1)
args = parser.parse_args()
print "First argument: %s" % args.ports
PORT = args.ports[0]
def main():
"""main"""
logger = modbus_tk.utils.create_logger("console")
try:
#Connect to the slave
master = modbus_rtu.RtuMaster(
serial.Serial(port=PORT, baudrate=9600, bytesize=8, parity='N', stopbits=1, xonxoff=0)
)
master.set_timeout(1.0)
master.set_verbose(True)
logger.info("connected")
logger.info(master.execute(1, cst.READ_HOLDING_REGISTERS, 0, 10))
logger.info(master.execute(1, cst.READ_HOLDING_REGISTERS, 5, 1))
logger.info(master.execute(1, cst.READ_COILS, 0, 10))
#send some queries
#logger.info(master.execute(1, cst.READ_COILS, 0, 10))
#logger.info(master.execute(1, cst.READ_DISCRETE_INPUTS, 0, 8))
#logger.info(master.execute(1, cst.READ_INPUT_REGISTERS, 100, 3))
#logger.info(master.execute(1, cst.READ_HOLDING_REGISTERS, 100, 12))
#logger.info(master.execute(1, cst.WRITE_SINGLE_COIL, 7, output_value=1))
#logger.info(master.execute(1, cst.WRITE_SINGLE_REGISTER, 100, output_value=54))
#logger.info(master.execute(1, cst.WRITE_MULTIPLE_COILS, 0, output_value=[1, 1, 0, 1, 1, 0, 1, 1]))
#logger.info(master.execute(1, cst.WRITE_MULTIPLE_REGISTERS, 100, output_value=xrange(12)))
except modbus_tk.modbus.ModbusError as exc:
logger.error("%s- Code=%d", exc, exc.get_exception_code())
if __name__ == "__main__":
main()
| {
"content_hash": "fa29b299a2366912ae958a0016f30995",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 107,
"avg_line_length": 31.776119402985074,
"alnum_prop": 0.6256458431188351,
"repo_name": "nodesense/nodesense-modbus-serial",
"id": "42c168f6890efea0460eacd7266a7953193aa78c",
"size": "2177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/rtu_master.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "21248"
},
{
"name": "Python",
"bytes": "6046"
},
{
"name": "TypeScript",
"bytes": "24831"
}
],
"symlink_target": ""
} |
import platform
import six.moves.configparser
from base64 import b64decode
from typing import Optional, Set
config_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/pull/206
config_file.read("/etc/zulip/zulip.conf")
# Whether we're running in a production environment. Note that PRODUCTION does
# **not** mean hosted on Zulip.com; customer sites are PRODUCTION and VOYAGER
# and as such should not assume they are the main Zulip site.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
# The following flags are left over from the various configurations of
# Zulip run by Zulip, Inc. We will eventually be able to get rid of
# them and just have the PRODUCTION flag, but we need them for now.
ZULIP_COM_STAGING = PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-staging'
ZULIP_COM = ((PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-prod') or
ZULIP_COM_STAGING)
if not ZULIP_COM:
raise Exception("You should create your own local settings from prod_settings_template.")
ZULIP_FRIENDS_LIST_ID = '84b2f3da6b'
SHOW_OSS_ANNOUNCEMENT = True
REGISTER_LINK_DISABLED = True
CUSTOM_LOGO_URL = "/static/images/logo/zulip-dropbox.png"
VERBOSE_SUPPORT_OFFERS = True
# This can be filled in automatically from the database, maybe
DEPLOYMENT_ROLE_NAME = 'zulip.com'
# XXX: replace me
CAMO_URI = 'https://external-content.zulipcdn.net/'
# Leave EMAIL_HOST unset or empty if you do not wish for emails to be sent
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'zulip@zulip.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Zulip <zulip@zulip.com>"
# The noreply address to be used as Reply-To for certain generated emails.
NOREPLY_EMAIL_ADDRESS = "Zulip <noreply@zulip.com>"
WELCOME_EMAIL_SENDER = {'email': 'wdaher@zulip.com', 'name': 'Waseem Daher'}
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
REMOTE_POSTGRES_HOST = "postgres.zulip.net"
STATSD_HOST = 'stats.zulip.net'
if ZULIP_COM_STAGING:
EXTERNAL_HOST = 'staging.zulip.com'
STATSD_PREFIX = 'staging'
STAGING_ERROR_NOTIFICATIONS = True
SAVE_FRONTEND_STACKTRACES = True
else:
EXTERNAL_HOST = 'zulip.com'
EXTERNAL_API_PATH = 'api.zulip.com'
STATSD_PREFIX = 'app'
# Terms of Service
TERMS_OF_SERVICE = 'corporate/terms.md'
# Major version number (the stuff before the first '.') has to be an integer.
# Users will be asked to re-sign the TOS only when the major version number increases.
# A TOS_VERSION of None has a major version number of -1.
# TOS_VERSION = '1.0'
# FIRST_TIME_TOS_TEMPLATE = 'zulipchat_migration_tos.html'
# Buckets used for Amazon S3 integration for storing files and user avatars.
S3_AUTH_UPLOADS_BUCKET = "zulip-user-uploads"
S3_AVATAR_BUCKET = "humbug-user-avatars"
APNS_SANDBOX = False
APNS_FEEDBACK = "feedback_production"
APNS_CERT_FILE = "/etc/ssl/django-private/apns-dist.pem"
DBX_APNS_CERT_FILE = "/etc/ssl/django-private/dbx-apns-dist.pem"
GOOGLE_OAUTH2_CLIENT_ID = '835904834568-ag4p18v0sd9a0tero14r3gekn6shoen3.apps.googleusercontent.com'
# The email address pattern to use for auto-generated stream emails
# The %s will be replaced with a unique token.
if ZULIP_COM_STAGING:
EMAIL_GATEWAY_PATTERN = "%s@streams.staging.zulip.com"
else:
EMAIL_GATEWAY_PATTERN = "%s@streams.zulip.com"
EMAIL_GATEWAY_EXTRA_PATTERN_HACK = r'@[\w-]*\.zulip\.net'
# Email mirror configuration
# The email of the Zulip bot that the email gateway should post as.
EMAIL_GATEWAY_BOT = "emailgateway@zulip.com"
SSO_APPEND_DOMAIN = None # type: Optional[str]
AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
'zproject.backends.GoogleMobileOauth2Backend')
# ALLOWED_HOSTS is used by django to determine which addresses
# Zulip can serve. This is a security measure.
# The following are the zulip.com hosts
ALLOWED_HOSTS = ['localhost', '.humbughq.com', '54.214.48.144', '54.213.44.54',
'54.213.41.54', '54.213.44.58', '54.213.44.73',
'54.200.19.65', '54.201.95.104', '54.201.95.206',
'54.201.186.29', '54.200.111.22',
'54.245.120.64', '54.213.44.83', '.zulip.com', '.zulip.net',
'54.244.50.66', '54.244.50.67', '54.244.50.68', '54.244.50.69', '54.244.50.70',
'54.244.50.64', '54.244.50.65', '54.244.50.74',
'chat.dropboxer.net']
NOTIFICATION_BOT = "notification-bot@zulip.com"
ERROR_BOT = "error-bot@zulip.com"
NEW_USER_BOT = "new-user-bot@zulip.com"
NAGIOS_SEND_BOT = 'iago@zulip.com'
NAGIOS_RECEIVE_BOT = 'othello@zulip.com'
# Our internal deployment has nagios checks for both staging and prod
NAGIOS_STAGING_SEND_BOT = 'iago@zulip.com'
NAGIOS_STAGING_RECEIVE_BOT = 'cordelia@zulip.com'
# Also used for support email in emails templates
ZULIP_ADMINISTRATOR = 'support@zulip.com'
ADMINS = (
('Zulip Error Reports', 'errors@zulip.com'),
)
EXTRA_INSTALLED_APPS = [
'analytics',
'zilencer',
'corporate',
]
EVENT_LOGS_ENABLED = True
SYSTEM_ONLY_REALMS = set() # type: Set[str]
| {
"content_hash": "b8af7c0ac6fe8a9090d1887349f4b2b3",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 116,
"avg_line_length": 38.022222222222226,
"alnum_prop": 0.7101110461718293,
"repo_name": "vabs22/zulip",
"id": "b278562b39c7d7fe5917b8ff6cbd0a64b2249985",
"size": "5560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zproject/local_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "404100"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "468187"
},
{
"name": "JavaScript",
"bytes": "2088122"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87465"
},
{
"name": "Python",
"bytes": "3556117"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "46689"
}
],
"symlink_target": ""
} |
__author__ = 'rolandh'
from saml2.attribute_converter import d_to_local_name
from saml2.attribute_converter import ac_factory
from saml2.mongo_store import export_mdstore_to_mongo_db
from saml2.mongo_store import MetadataMDB
from saml2.mdstore import MetadataStore
from saml2.mdstore import destinations
from saml2.mdstore import name
from saml2 import saml
from saml2 import md
from saml2 import config
from saml2.extension import mdui
from saml2.extension import idpdisc
from saml2.extension import dri
from saml2.extension import mdattr
from saml2.extension import ui
import xmldsig
import xmlenc
from pathutils import full_path
ONTS = {
saml.NAMESPACE: saml,
mdui.NAMESPACE: mdui,
mdattr.NAMESPACE: mdattr,
dri.NAMESPACE: dri,
ui.NAMESPACE: ui,
idpdisc.NAMESPACE: idpdisc,
md.NAMESPACE: md,
xmldsig.NAMESPACE: xmldsig,
xmlenc.NAMESPACE: xmlenc
}
ATTRCONV = ac_factory(full_path("attributemaps"))
def _eq(l1, l2):
return set(l1) == set(l2)
def test_metadata():
conf = config.Config()
conf.load_file("idp_conf_mdb")
UMU_IDP = 'https://idp.umu.se/saml2/idp/metadata.php'
# Set up a Metadata store
mds = MetadataStore(ONTS.values(), ATTRCONV, conf,
disable_ssl_certificate_validation=True)
# Import metadata from local file.
mds.imp({"local": [full_path("swamid-2.0.xml")]})
assert len(mds) == 1 # One source
export_mdstore_to_mongo_db(mds, "metadata", "test")
mdmdb = MetadataMDB(ONTS, ATTRCONV, "metadata", "test")
# replace all metadata instances with this one
mds.metadata = {"mongo_db": mdmdb}
idps = mds.with_descriptor("idpsso")
assert idps.keys()
idpsso = mds.single_sign_on_service(UMU_IDP)
assert len(idpsso) == 1
assert destinations(idpsso) == [
'https://idp.umu.se/saml2/idp/SSOService.php']
_name = name(mds[UMU_IDP])
assert _name == u'Ume\xe5 University'
certs = mds.certs(UMU_IDP, "idpsso", "signing")
assert len(certs) == 1
sps = mds.with_descriptor("spsso")
assert len(sps) == 356
wants = mds.attribute_requirement('https://connect.sunet.se/shibboleth')
assert wants["optional"] == []
lnamn = [d_to_local_name(mds.attrc, attr) for attr in wants["required"]]
assert _eq(lnamn, ['eduPersonPrincipalName', 'mail', 'givenName', 'sn',
'eduPersonScopedAffiliation', 'eduPersonAffiliation'])
wants = mds.attribute_requirement(
"https://gidp.geant.net/sp/module.php/saml/sp/metadata.php/default-sp")
# Optional
lnamn = [d_to_local_name(mds.attrc, attr) for attr in wants["optional"]]
assert _eq(lnamn, ['displayName', 'commonName', 'schacHomeOrganization',
'eduPersonAffiliation', 'schacHomeOrganizationType'])
# Required
lnamn = [d_to_local_name(mds.attrc, attr) for attr in wants["required"]]
assert _eq(lnamn, ['eduPersonTargetedID', 'mail',
'eduPersonScopedAffiliation'])
if __name__ == "__main__":
test_metadata()
| {
"content_hash": "50c5c8e2aae6a01e9a49d59a7cc6ce11",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 31.96842105263158,
"alnum_prop": 0.6736911425749095,
"repo_name": "arbn/pysaml2",
"id": "077a3f4e619cd029313dd5420dfe62dfc07434e1",
"size": "3062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_76_metadata_in_mdb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2404671"
},
{
"name": "Shell",
"bytes": "3398"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import gettext as _
class Product(models.Model):
name = models.CharField(max_length=128)
# The actual product's internal name, I am sure all products have one
# This simplifies the database table function invocations
product_code = models.CharField(max_length=128, unique=True)
def __unicode__(self):
return self.name
class Location(models.Model):
class Meta:
verbose_name = _('Location')
verbose_name_plural = _('Locations')
description = models.CharField(max_length=128)
code = models.CharField(max_length=12, unique=True)
def __unicode__(self):
return self.description
class Source(models.Model):
"""
Represents the concept of a data source
Ideally this should define where the data should be obtained
For now, we will be using filtering to get this done with
"""
class Meta:
verbose_name = _('Source')
verbose_name_plural = _('Sources')
name = models.CharField(max_length=128)
code = models.IntegerField(unique=True)
def __unicode__(self):
return self.name
class Indicator(models.Model):
name = models.CharField(max_length=128, unique=True)
description = models.CharField(max_length=128)
# Determines the order of the field during rendering
# In case there are multiple indicators to be displayed
order = models.IntegerField(default=0)
# Determines custom factors
# Set of locations for which this indicator needs to be calculated
locations = models.ManyToManyField(Location)
# The product to which this is associated
product = models.ForeignKey(Product)
# The data source of this indicator
source = models.ManyToManyField(Source)
def __unicode__(self):
return self.name
def source(self):
# Override this to get the data source
return product.product_code
def get_value(self, start_date, end_date):
processor = IndicatorProcessor.factory("Sum")
return processor.process(start_date, end_date, self)
| {
"content_hash": "8c787f309d20e255a7488af3d9d77939",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 73,
"avg_line_length": 29.13888888888889,
"alnum_prop": 0.6901811248808389,
"repo_name": "creativepsyco/django-analytics",
"id": "d7e291827c07120197cd07a0d69a8b18b0b72af2",
"size": "2098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_analytics/report/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9934"
}
],
"symlink_target": ""
} |
fname = input('Enter the file name: ')
fhand = open(fname)
count = 0
for line in fhand:
if line.startswith('Subject:') :
count = count + 1
print('There were', count, 'subject lines in', fname)
| {
"content_hash": "d0047f3738a4e9107b57ec3dc7ca863e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6407766990291263,
"repo_name": "mkhuthir/learnPython",
"id": "b8645d9f4a9588947fd0ff5ad548eb122d9f6502",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_pythonlearn_com/02_strings/search6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first_app', '0008_slotcharacter'),
]
operations = [
migrations.AlterField(
model_name='slotcharacter',
name='slot',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='first_app.InventoryCharacter'),
),
]
| {
"content_hash": "b46f1e65ec470d6992486a175a8910c3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 127,
"avg_line_length": 25.789473684210527,
"alnum_prop": 0.6469387755102041,
"repo_name": "CONSOLNY/rglk",
"id": "9e6d17d97ed645344ce3853d79bc791eb6a14679",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "first_app/migrations/0009_auto_20160401_0347.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4323"
},
{
"name": "Python",
"bytes": "38552"
}
],
"symlink_target": ""
} |
from .configurable_waiting_times import *
from .discard_info import *
from .game_info import *
from .game_message_option import *
from .game_type_options import *
from .lobby_options import *
from .player_info import *
from .seat_info import *
from .table_params import *
from .tuple_table import * | {
"content_hash": "317bfd201e1e3e50dd212b55d9d5f2e0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 29.8,
"alnum_prop": 0.7583892617449665,
"repo_name": "Ericmas001/BluffinMuffin.Protocol",
"id": "c053ab0c1488aa9d2cb2ee2431b5427116f629fb",
"size": "298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/bluffinmuffin/protocol/data_types/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "158977"
},
{
"name": "PowerShell",
"bytes": "3633"
},
{
"name": "Python",
"bytes": "94998"
}
],
"symlink_target": ""
} |
from django import template
from groups.models import Group
register = template.Library()
@register.filter('get_total_members')
def get_total_members(members_set_list):
'''Returns the total of all members in the group'''
return len(members_set_list.all())
| {
"content_hash": "8907f59ffd87d29f67221496904b5b7d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.7388059701492538,
"repo_name": "pihentagyu/django_chms",
"id": "f3773430624335569b06c489a2ae6f82565b6c13",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groups/templatetags/group_extras.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3188"
},
{
"name": "HTML",
"bytes": "32792"
},
{
"name": "JavaScript",
"bytes": "13592"
},
{
"name": "Python",
"bytes": "106296"
},
{
"name": "TeX",
"bytes": "1745"
}
],
"symlink_target": ""
} |
__name__ = 'test'
import Queue
import loggerfactory
import multiprocessing
import ConfigParser
log = None
def readConfigFile(configFile, section, options=None):
logs = [('info', 'Reading config file [{0}]'.format(configFile))]
config = ConfigParser.ConfigParser()
if not config.read(configFile):
logs.append(('warning', 'Can\'t read config file'))
return (options, logs)
if section not in config.sections():
logs.append(('warning', 'Section [{0}] not found. Will use defaults'.format(section)))
return (options, logs)
for op in options:
if config.has_option(section, op):
options[op] = config.get(section, op)
return (options, logs)
def getStartParams(params):
startParams = {'db_file':'/tmp/feeds.db','log_file':'/tmp/test1.log','sleep_time':'0'}
(startParams, logs) = readConfigFile(params['module_ini'], __name__.upper(), startParams)
global log
log = loggerfactory.createLogger(__name__, startParams['log_file'])
logDispatch = {
'info': lambda l, s: l.info(s),
'debug': lambda l, s: l.debug(s),
'warning': lambda l, s: l.warning(s),
'error': lambda l, s: l.error(s)
}
for l in logs:
logDispatch[l[0]](log, l[1])
log.info(startParams)
return (startParams['db_file'],)
def run(queue, params):
db = getStartParams(params)
log.info('DB file: [{0}]'.format(db))
while True:
(command, params) = queue.get()
log.debug('{0} recieved command: [{1}]'.format(multiprocessing.current_process().name, str(command)))
if command == 'stop':
log.info('{0} stopping...'.format(multiprocessing.current_process().name))
log.info('{0} [OK]'.format(multiprocessing.current_process().name))
return
| {
"content_hash": "5f2071cb3e55ea07d46387f95cea6bb7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 103,
"avg_line_length": 25.96875,
"alnum_prop": 0.6732851985559567,
"repo_name": "unix-beard/newsbot",
"id": "d15bfef1c065993b57f64dbbb969528d2091f460",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/modules/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45317"
}
],
"symlink_target": ""
} |
"""
Get data from sensors and post it to specified mqtt channel in json-format
Requires:
Ruuvitag sensor - pip3 install --user ruuvitag-sensor
Paho MQTT - pip3 install --user paho-mqtt
Example usage:
./post_to_mqtt.py --mac DE:AD:BE:EE:EE:FF -b mqtt.ikenet \
-t ruuvitag/sauna -i 60 -l saunassa
See here how to automate this using Ansible:
https://github.com/RedHatNordicsSA/iot-hack/blob/master/run-ruuvi-to-mqtt.yml
"""
import time
import json
import sys
import signal
import argparse
import paho.mqtt.client as mqtt
from paho.mqtt import publish
from ruuvitag_sensor.ruuvitag import RuuviTag
parser = argparse.ArgumentParser(
description='Program relays Ruuvitag BLE temperature and humidity'
'advertisements to MQTT broker.')
parser.add_argument(
'-m', '--mac', dest='mac_address', required=True,
help='Ruuvitag MAC address')
parser.add_argument(
'-b', '--broker', dest='mqtt_broker', required=True,
help='mqtt broker address, ip or fqdn')
parser.add_argument(
'-t', '--topic', dest='mqtt_topic', required=True,
help='mqtt topic, e.g. ruuvitag/sauna')
parser.add_argument(
'-a', '--all', action='store_true', required=False,
help='send all Ruuvitag values')
parser.add_argument(
'-i', '--interval', dest='interval', default=60,
type=int, required=False,
help='seconds to wait between data queries')
parser.add_argument(
'-l', '--location', dest='location', required=False,
help='additional location tag for json')
args = parser.parse_args()
mac_address = args.mac_address
mqtt_broker = args.mqtt_broker
mqtt_topic = args.mqtt_topic
interval = args.interval
send_all = args.all
location = args.location
# let's trap ctrl-c, SIGINT and come down nicely
# pylint: disable=unused-argument,redefined-outer-name
def signal_handler(signal, frame):
print('\nterminating gracefully.')
client.disconnect()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# The callback for when the client receives a CONNACK response from the MQTT server.
# pylint: disable=unused-argument,redefined-outer-name
def on_connect(client, userdata, flags, rc):
print(f'Connected to MQTT broker with result code {str(rc)}')
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe('$SYS/#')
client = mqtt.Client()
client.on_connect = on_connect
client.connect(mqtt_broker, 1883, 60)
client.loop_start()
print('Start listening to Ruuvitag')
sensor = RuuviTag(mac_address)
while True:
# update state from the device
state = sensor.update()
if location:
state['location'] = location
else:
state['location'] = mac_address
if send_all:
mqtt_msg = json.dumps(state)
else:
# extract temp and humidity values, and format data into custom JSON
for_json = {
'location': state['location'],
'temperature': round(state['temperature'], 1),
'humidity': round(state['humidity'], 1)
}
mqtt_msg = json.dumps(for_json)
publish.single(mqtt_topic, mqtt_msg, hostname=mqtt_broker)
print('.', end='', flush=True)
time.sleep(interval)
| {
"content_hash": "19f8236d1d77465a39de1da19ab1dd07",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 84,
"avg_line_length": 29.08108108108108,
"alnum_prop": 0.6899008674101611,
"repo_name": "ttu/ruuvitag-sensor",
"id": "f93d8ae1a6f61d62aed942990cca783379d44646",
"size": "3243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/post_to_mqtt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73816"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
} |
"""
Multiple loop script calling up simviz1.py. That is, the
user can assign multiple values to any of the legal input
parameters to simviz1.py and automatically get a loop over
all combinations of the input parameter values.
From each experiment we extract the maximum amplitude of
the oscillations and store this in a data structure
amplitude[p], where p is a tuple containing the values
of each input parameter.
"""
import sys, math, os, commands
import scitools.multipleloop as mp
import scitools.filetable
# load command-line arguments into dictionary of legal prm names
p = {'m': 1, 'b': 0.7, 'c': 5, 'func': 'y', 'A': 5,
'w': 2*math.pi, 'y0': 0.2, 'tstop': 30, 'dt': 0.05}
# (case is not included since this parameter is overridden)
for i in range(len(sys.argv[1:])):
name = sys.argv[i][1:] # skip initial hyphen for prm name
if name in p:
p[name] = sys.argv[i+1]
#p = {'w': '[0.7:1.3,0.1]', 'b': '1 & 0.3 & 0', 'func': 'y & siny'}
prm_values = [(name, mp.input2values(p[name])) for name in p]
all, names, varied = mp.combine(prm_values)
for experiment in all:
print experiment
options = mp.options(all, names, prefix='-')
for cmlargs in options:
print cmlargs
def get_amplitude():
# load data from sim.dat:
t, y = scitools.filetable.readfile(os.path.join('tmp1','sim.dat'))
amplitude = max(y[len(y)/2:]) # max of last half of y
return amplitude
# add directory where simviz1.py resides to PATH:
os.environ['PATH'] += os.pathsep + \
os.path.join(os.environ['scripting'], 'src','py','intro')
amplitude = []
# amplitude[i] equals (vprms, amp), where amp is the amplitude
# and vprms are the varied parameters, those with indicies
indices_varied = [names.index(i) for i in varied]
for cmlargs, parameters in zip(options, all):
cmd = 'simviz1.py ' + cmlargs + ' -noscreenplot -case tmp1'
failure, output = commands.getstatusoutput(cmd)
varied_parameters = [parameters[i] for i in indices_varied]
amplitude.append((varied_parameters, get_amplitude()))
# plot amplitude as function of w:
i = names.index('w')
for p, a in amplitude:
print p, a
| {
"content_hash": "f1446b351c060123950eea9b336f8965",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 70,
"avg_line_length": 33.84126984126984,
"alnum_prop": 0.6819887429643527,
"repo_name": "sniemi/SamPy",
"id": "1ecb75df082704ec0286fd6d29471e4e1c9b66b3",
"size": "2154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/src1/TCSE3-3rd-examples/src/py/examples/simviz/mloop4simviz1.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Note that this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "1cc15a8e99dcb750a26c62e89a656b9d",
"timestamp": "",
"source": "github",
"line_count": 2420,
"max_line_length": 100,
"avg_line_length": 38.38099173553719,
"alnum_prop": 0.572662087379686,
"repo_name": "TK-TarunW/ecosystem",
"id": "5fb10f86f4692dcb16d6d235ed9011dedbd06713",
"size": "93667",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spark-2.0.2-bin-hadoop2.7/python/pyspark/rdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AspectJ",
"bytes": "9732"
},
{
"name": "Batchfile",
"bytes": "188552"
},
{
"name": "C",
"bytes": "598922"
},
{
"name": "C++",
"bytes": "949406"
},
{
"name": "CSS",
"bytes": "551632"
},
{
"name": "HTML",
"bytes": "98452345"
},
{
"name": "Java",
"bytes": "7461358"
},
{
"name": "JavaScript",
"bytes": "38346"
},
{
"name": "M4",
"bytes": "76410"
},
{
"name": "Makefile",
"bytes": "144646"
},
{
"name": "Perl",
"bytes": "226332"
},
{
"name": "Perl6",
"bytes": "70570"
},
{
"name": "PowerShell",
"bytes": "13769"
},
{
"name": "Python",
"bytes": "2259345"
},
{
"name": "R",
"bytes": "226319"
},
{
"name": "Scala",
"bytes": "554308"
},
{
"name": "Shell",
"bytes": "772310"
},
{
"name": "XS",
"bytes": "132876"
},
{
"name": "XSLT",
"bytes": "27180"
}
],
"symlink_target": ""
} |
"""
Dummy conftest.py for pype.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
from __future__ import print_function, absolute_import, division
import pytest
| {
"content_hash": "f1f316c2c6b4bd1b11eccf2ffc03adb3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 26.4,
"alnum_prop": 0.7045454545454546,
"repo_name": "cs207-project/pype-package",
"id": "10e62ec252d2d0b0e6f900415260fd8e5b464954",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72684"
},
{
"name": "Shell",
"bytes": "1305"
}
],
"symlink_target": ""
} |
import unittest
import os, os.path
import vsan_policy
import vmdk_utils
import volume_kv
import vsan_info
class TestVsanPolicy(unittest.TestCase):
""" Test VSAN Policy code """
@unittest.skipIf(not vsan_info.get_vsan_datastore(),
"VSAN is not found - skipping vsan_info tests")
def setUp(self):
self.policy_path = os.path.join(vsan_info.get_vsan_dockvols_path(),
'policies/test_policy')
self.name = 'test_policy'
self.content = ('(("proportionalCapacity" i50) '
'("hostFailuresToTolerate" i0))')
def tearDown(self):
try:
os.remove(self.policy_path)
except:
pass
def assertPoliciesEqual(self):
with open(self.policy_path) as f:
content = f.read()
# Remove the added newline
self.assertEqual(content[:-1], self.content)
def test_create(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
def test_double_create_fails(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertNotEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
def test_create_delete(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
self.assertEqual(None, vsan_policy.delete(self.name))
self.assertFalse(os.path.isfile(self.policy_path))
def test_delete_nonexistent_policy_fails(self):
self.assertNotEqual(None, vsan_policy.delete(self.name))
def test_create_list(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
policies = vsan_policy.get_policies()
self.assertTrue(self.content + '\n', policies[self.name])
if __name__ == '__main__':
volume_kv.init()
unittest.main()
| {
"content_hash": "e9a2f56abce403451d5087704206c171",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.6308860759493671,
"repo_name": "BaluDontu/docker-volume-vsphere",
"id": "53a1f33cd3846a7d2d3296e3058938e088591452",
"size": "2572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esx_service/vsan_policy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "42941"
},
{
"name": "CSS",
"bytes": "464"
},
{
"name": "Go",
"bytes": "63665"
},
{
"name": "HTML",
"bytes": "3858"
},
{
"name": "Java",
"bytes": "3375"
},
{
"name": "JavaScript",
"bytes": "48417"
},
{
"name": "Makefile",
"bytes": "18309"
},
{
"name": "Python",
"bytes": "385930"
},
{
"name": "Shell",
"bytes": "33780"
}
],
"symlink_target": ""
} |
from ZSI import _copyright, _seqtypes, ParsedSoap, SoapWriter, TC, ZSI_SCHEMA_URI,\
EvaluateException, FaultFromFaultMessage, _child_elements, _attrs, _find_arraytype,\
_find_type, _get_idstr, _get_postvalue_from_absoluteURI, FaultException, WSActionException,\
UNICODE_ENCODING
from ZSI.auth import AUTH
from ZSI.TC import AnyElement, AnyType, String, TypeCode, _get_global_element_declaration,\
_get_type_definition
from ZSI.TCcompound import Struct
import base64, httplib, Cookie, types, time, urlparse
from ZSI.address import Address
from ZSI.wstools.logging import getLogger as _GetLogger
_b64_encode = base64.encodestring
class _AuthHeader:
"""<BasicAuth xmlns="ZSI_SCHEMA_URI">
<Name>%s</Name><Password>%s</Password>
</BasicAuth>
"""
def __init__(self, name=None, password=None):
self.Name = name
self.Password = password
_AuthHeader.typecode = Struct(_AuthHeader, ofwhat=(String((ZSI_SCHEMA_URI,'Name'), typed=False),
String((ZSI_SCHEMA_URI,'Password'), typed=False)), pname=(ZSI_SCHEMA_URI,'BasicAuth'),
typed=False)
class _Caller:
'''Internal class used to give the user a callable object
that calls back to the Binding object to make an RPC call.
'''
def __init__(self, binding, name, namespace=None):
self.binding = binding
self.name = name
self.namespace = namespace
def __call__(self, *args):
nsuri = self.namespace
if nsuri is None:
return self.binding.RPC(None, self.name, args,
encodingStyle="http://schemas.xmlsoap.org/soap/encoding/",
replytype=TC.Any(self.name+"Response"))
return self.binding.RPC(None, (nsuri,self.name), args,
encodingStyle="http://schemas.xmlsoap.org/soap/encoding/",
replytype=TC.Any((nsuri,self.name+"Response")))
class _NamedParamCaller:
'''Similar to _Caller, expect that there are named parameters
not positional.
'''
def __init__(self, binding, name, namespace=None):
self.binding = binding
self.name = name
self.namespace = namespace
def __call__(self, **params):
# Pull out arguments that Send() uses
kw = {}
for key in [ 'auth_header', 'nsdict', 'requesttypecode', 'soapaction' ]:
if params.has_key(key):
kw[key] = params[key]
del params[key]
nsuri = self.namespace
if nsuri is None:
return self.binding.RPC(None, self.name, None,
encodingStyle="http://schemas.xmlsoap.org/soap/encoding/",
_args=params,
replytype=TC.Any(self.name+"Response", aslist=False),
**kw)
return self.binding.RPC(None, (nsuri,self.name), None,
encodingStyle="http://schemas.xmlsoap.org/soap/encoding/",
_args=params,
replytype=TC.Any((nsuri,self.name+"Response"), aslist=False),
**kw)
class _Binding:
'''Object that represents a binding (connection) to a SOAP server.
Once the binding is created, various ways of sending and
receiving SOAP messages are available.
'''
defaultHttpTransport = httplib.HTTPConnection
defaultHttpsTransport = httplib.HTTPSConnection
logger = _GetLogger('ZSI.client.Binding')
def __init__(self, nsdict=None, transport=None, url=None, tracefile=None,
readerclass=None, writerclass=None, soapaction='',
wsAddressURI=None, sig_handler=None, transdict=None, **kw):
'''Initialize.
Keyword arguments include:
transport -- default use HTTPConnection.
transdict -- dict of values to pass to transport.
url -- URL of resource, POST is path
soapaction -- value of SOAPAction header
auth -- (type, name, password) triplet; default is unauth
nsdict -- namespace entries to add
tracefile -- file to dump packet traces
cert_file, key_file -- SSL data (q.v.)
readerclass -- DOM reader class
writerclass -- DOM writer class, implements MessageInterface
wsAddressURI -- namespaceURI of WS-Address to use. By default
it's not used.
sig_handler -- XML Signature handler, must sign and verify.
endPointReference -- optional Endpoint Reference.
'''
self.data = None
self.ps = None
self.user_headers = []
self.nsdict = nsdict or {}
self.transport = transport
self.transdict = transdict or {}
self.url = url
self.trace = tracefile
self.readerclass = readerclass
self.writerclass = writerclass
self.soapaction = soapaction
self.wsAddressURI = wsAddressURI
self.sig_handler = sig_handler
self.address = None
self.endPointReference = kw.get('endPointReference', None)
self.cookies = Cookie.SimpleCookie()
self.http_callbacks = {}
if kw.has_key('auth'):
self.SetAuth(*kw['auth'])
else:
self.SetAuth(AUTH.none)
def SetAuth(self, style, user=None, password=None):
'''Change auth style, return object to user.
'''
self.auth_style, self.auth_user, self.auth_pass = \
style, user, password
return self
def SetURL(self, url):
'''Set the URL we post to.
'''
self.url = url
return self
def ResetHeaders(self):
'''Empty the list of additional headers.
'''
self.user_headers = []
return self
def ResetCookies(self):
'''Empty the list of cookies.
'''
self.cookies = Cookie.SimpleCookie()
def AddHeader(self, header, value):
'''Add a header to send.
'''
self.user_headers.append((header, value))
return self
def __addcookies(self):
'''Add cookies from self.cookies to request in self.h
'''
for cname, morsel in self.cookies.items():
attrs = []
value = morsel.get('version', '')
if value != '' and value != '0':
attrs.append('$Version=%s' % value)
attrs.append('%s=%s' % (cname, morsel.coded_value))
value = morsel.get('path')
if value:
attrs.append('$Path=%s' % value)
value = morsel.get('domain')
if value:
attrs.append('$Domain=%s' % value)
self.h.putheader('Cookie', "; ".join(attrs))
def RPC(self, url, opname, obj, replytype=None, **kw):
'''Send a request, return the reply. See Send() and Recieve()
docstrings for details.
'''
self.Send(url, opname, obj, **kw)
return self.Receive(replytype, **kw)
def Send(self, url, opname, obj, nsdict={}, soapaction=None, wsaction=None,
endPointReference=None, soapheaders=(), **kw):
'''Send a message. If url is None, use the value from the
constructor (else error). obj is the object (data) to send.
Data may be described with a requesttypecode keyword, the default
is the class's typecode (if there is one), else Any.
Try to serialize as a Struct, if this is not possible serialize an Array. If
data is a sequence of built-in python data types, it will be serialized as an
Array, unless requesttypecode is specified.
arguments:
url --
opname -- struct wrapper
obj -- python instance
key word arguments:
nsdict --
soapaction --
wsaction -- WS-Address Action, goes in SOAP Header.
endPointReference -- set by calling party, must be an
EndPointReference type instance.
soapheaders -- list of pyobj, typically w/typecode attribute.
serialized in the SOAP:Header.
requesttypecode --
'''
url = url or self.url
endPointReference = endPointReference or self.endPointReference
# Serialize the object.
d = {}
d.update(self.nsdict)
d.update(nsdict)
sw = SoapWriter(nsdict=d, header=True, outputclass=self.writerclass,
encodingStyle=kw.get('encodingStyle'),)
requesttypecode = kw.get('requesttypecode')
if kw.has_key('_args'): #NamedParamBinding
tc = requesttypecode or TC.Any(pname=opname, aslist=False)
sw.serialize(kw['_args'], tc)
elif not requesttypecode:
tc = getattr(obj, 'typecode', None) or TC.Any(pname=opname, aslist=False)
try:
if type(obj) in _seqtypes:
obj = dict(map(lambda i: (i.typecode.pname,i), obj))
except AttributeError:
# can't do anything but serialize this in a SOAP:Array
tc = TC.Any(pname=opname, aslist=True)
else:
tc = TC.Any(pname=opname, aslist=False)
sw.serialize(obj, tc)
else:
sw.serialize(obj, requesttypecode)
for i in soapheaders:
sw.serialize_header(i)
#
# Determine the SOAP auth element. SOAP:Header element
if self.auth_style & AUTH.zsibasic:
sw.serialize_header(_AuthHeader(self.auth_user, self.auth_pass),
_AuthHeader.typecode)
#
# Serialize WS-Address
if self.wsAddressURI is not None:
if self.soapaction and wsaction.strip('\'"') != self.soapaction:
raise WSActionException, 'soapAction(%s) and WS-Action(%s) must match'\
%(self.soapaction,wsaction)
self.address = Address(url, self.wsAddressURI)
self.address.setRequest(endPointReference, wsaction)
self.address.serialize(sw)
#
# WS-Security Signature Handler
if self.sig_handler is not None:
self.sig_handler.sign(sw)
scheme,netloc,path,nil,nil,nil = urlparse.urlparse(url)
transport = self.transport
if transport is None and url is not None:
if scheme == 'https':
transport = self.defaultHttpsTransport
elif scheme == 'http':
transport = self.defaultHttpTransport
else:
raise RuntimeError, 'must specify transport or url startswith https/http'
# Send the request.
if issubclass(transport, httplib.HTTPConnection) is False:
raise TypeError, 'transport must be a HTTPConnection'
soapdata = str(sw)
self.h = transport(netloc, None, **self.transdict)
self.h.connect()
self.boundary = sw.getMIMEBoundary()
self.startCID = sw.getStartCID()
self.SendSOAPData(soapdata, url, soapaction, **kw)
def SendSOAPData(self, soapdata, url, soapaction, headers={}, **kw):
# Tracing?
if self.trace:
print >>self.trace, "_" * 33, time.ctime(time.time()), "REQUEST:"
print >>self.trace, soapdata
url = url or self.url
request_uri = _get_postvalue_from_absoluteURI(url)
self.h.putrequest("POST", request_uri)
self.h.putheader("Content-Length", "%d" % len(soapdata))
if len(self.boundary) == 0:
#no attachment
self.h.putheader("Content-Type", 'text/xml; charset="%s"' %UNICODE_ENCODING)
else:
#we have attachment
contentType = "multipart/related; "
self.h.putheader("Content-Type" , "multipart/related; boundary=\"" + self.boundary + "\"; start=\"" + self.startCID + '\"; type="text/xml"')
self.__addcookies()
for header,value in headers.items():
self.h.putheader(header, value)
SOAPActionValue = '"%s"' % (soapaction or self.soapaction)
self.h.putheader("SOAPAction", SOAPActionValue)
if self.auth_style & AUTH.httpbasic:
val = _b64_encode(self.auth_user + ':' + self.auth_pass) \
.replace("\012", "")
self.h.putheader('Authorization', 'Basic ' + val)
elif self.auth_style == AUTH.httpdigest and not headers.has_key('Authorization') \
and not headers.has_key('Expect'):
def digest_auth_cb(response):
self.SendSOAPDataHTTPDigestAuth(response, soapdata, url, request_uri, soapaction, **kw)
self.http_callbacks[401] = None
self.http_callbacks[401] = digest_auth_cb
for header,value in self.user_headers:
self.h.putheader(header, value)
self.h.endheaders()
self.h.send(soapdata)
# Clear prior receive state.
self.data, self.ps = None, None
def SendSOAPDataHTTPDigestAuth(self, response, soapdata, url, request_uri, soapaction, **kw):
'''Resend the initial request w/http digest authorization headers.
The SOAP server has requested authorization. Fetch the challenge,
generate the authdict for building a response.
'''
if self.trace:
print >>self.trace, "------ Digest Auth Header"
url = url or self.url
if response.status != 401:
raise RuntimeError, 'Expecting HTTP 401 response.'
if self.auth_style != AUTH.httpdigest:
raise RuntimeError,\
'Auth style(%d) does not support requested digest authorization.' %self.auth_style
from ZSI.digest_auth import fetch_challenge,\
generate_response,\
build_authorization_arg,\
dict_fetch
chaldict = fetch_challenge( response.getheader('www-authenticate') )
if dict_fetch(chaldict,'challenge','').lower() == 'digest' and \
dict_fetch(chaldict,'nonce',None) and \
dict_fetch(chaldict,'realm',None) and \
dict_fetch(chaldict,'qop',None):
authdict = generate_response(chaldict,
request_uri, self.auth_user, self.auth_pass, method='POST')
headers = {\
'Authorization':build_authorization_arg(authdict),
'Expect':'100-continue',
}
self.SendSOAPData(soapdata, url, soapaction, headers, **kw)
return
raise RuntimeError,\
'Client expecting digest authorization challenge.'
def ReceiveRaw(self, **kw):
'''Read a server reply, unconverted to any format and return it.
'''
if self.data: return self.data
trace = self.trace
while 1:
response = self.h.getresponse()
self.reply_code, self.reply_msg, self.reply_headers, self.data = \
response.status, response.reason, response.msg, response.read()
if trace:
print >>trace, "_" * 33, time.ctime(time.time()), "RESPONSE:"
for i in (self.reply_code, self.reply_msg,):
print >>trace, str(i)
print >>trace, "-------"
print >>trace, str(self.reply_headers)
print >>trace, self.data
saved = None
for d in response.msg.getallmatchingheaders('set-cookie'):
if d[0] in [ ' ', '\t' ]:
saved += d.strip()
else:
if saved: self.cookies.load(saved)
saved = d.strip()
if saved: self.cookies.load(saved)
if response.status == 401:
if not callable(self.http_callbacks.get(response.status,None)):
raise RuntimeError, 'HTTP Digest Authorization Failed'
self.http_callbacks[response.status](response)
continue
if response.status != 100: break
# The httplib doesn't understand the HTTP continuation header.
# Horrible internals hack to patch things up.
self.h._HTTPConnection__state = httplib._CS_REQ_SENT
self.h._HTTPConnection__response = None
return self.data
def IsSOAP(self):
if self.ps: return 1
self.ReceiveRaw()
mimetype = self.reply_headers.type
return mimetype == 'text/xml'
def ReceiveSOAP(self, readerclass=None, **kw):
'''Get back a SOAP message.
'''
if self.ps: return self.ps
if not self.IsSOAP():
raise TypeError(
'Response is "%s", not "text/xml"' % self.reply_headers.type)
if len(self.data) == 0:
raise TypeError('Received empty response')
self.ps = ParsedSoap(self.data,
readerclass=readerclass or self.readerclass,
encodingStyle=kw.get('encodingStyle'))
if self.sig_handler is not None:
self.sig_handler.verify(self.ps)
return self.ps
def IsAFault(self):
'''Get a SOAP message, see if it has a fault.
'''
self.ReceiveSOAP()
return self.ps.IsAFault()
def ReceiveFault(self, **kw):
'''Parse incoming message as a fault. Raise TypeError if no
fault found.
'''
self.ReceiveSOAP(**kw)
if not self.ps.IsAFault():
raise TypeError("Expected SOAP Fault not found")
return FaultFromFaultMessage(self.ps)
def Receive(self, replytype, **kw):
'''Parse message, create Python object.
KeyWord data:
faults -- list of WSDL operation.fault typecodes
wsaction -- If using WS-Address, must specify Action value we expect to
receive.
'''
self.ReceiveSOAP(**kw)
if self.ps.IsAFault():
msg = FaultFromFaultMessage(self.ps)
raise FaultException(msg)
tc = replytype
if hasattr(replytype, 'typecode'):
tc = replytype.typecode
reply = self.ps.Parse(tc)
if self.address is not None:
self.address.checkResponse(self.ps, kw.get('wsaction'))
return reply
def __repr__(self):
return "<%s instance %s>" % (self.__class__.__name__, _get_idstr(self))
class Binding(_Binding):
'''Object that represents a binding (connection) to a SOAP server.
Can be used in the "name overloading" style.
class attr:
gettypecode -- funcion that returns typecode from typesmodule,
can be set so can use whatever mapping you desire.
'''
gettypecode = staticmethod(lambda mod,e: getattr(mod, str(e.localName)).typecode)
logger = _GetLogger('ZSI.client.Binding')
def __init__(self, url, namespace=None, typesmodule=None, **kw):
"""
Parameters:
url -- location of service
namespace -- optional root element namespace
typesmodule -- optional response only. dict(name=typecode),
lookup for all children of root element.
"""
self.typesmodule = typesmodule
self.namespace = namespace
_Binding.__init__(self, url=url, **kw)
def __getattr__(self, name):
'''Return a callable object that will invoke the RPC method
named by the attribute.
'''
if name[:2] == '__' and len(name) > 5 and name[-2:] == '__':
if hasattr(self, name): return getattr(self, name)
return getattr(self.__class__, name)
return _Caller(self, name, self.namespace)
def __parse_child(self, node):
'''for rpc-style map each message part to a class in typesmodule
'''
try:
tc = self.gettypecode(self.typesmodule, node)
except:
self.logger.debug('didnt find typecode for "%s" in typesmodule: %s',
node.localName, self.typesmodule)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
self.logger.debug('parse child with typecode : %s', tc)
try:
return tc.parse(node, self.ps)
except Exception:
self.logger.debug('parse failed try Any : %s', tc)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
def Receive(self, replytype, **kw):
'''Parse message, create Python object.
KeyWord data:
faults -- list of WSDL operation.fault typecodes
wsaction -- If using WS-Address, must specify Action value we expect to
receive.
'''
self.ReceiveSOAP(**kw)
ps = self.ps
tp = _find_type(ps.body_root)
isarray = ((type(tp) in (tuple,list) and tp[1] == 'Array') or _find_arraytype(ps.body_root))
if self.typesmodule is None or isarray:
return _Binding.Receive(self, replytype, **kw)
if ps.IsAFault():
msg = FaultFromFaultMessage(ps)
raise FaultException(msg)
tc = replytype
if hasattr(replytype, 'typecode'):
tc = replytype.typecode
#Ignore response wrapper
reply = {}
for elt in _child_elements(ps.body_root):
name = str(elt.localName)
reply[name] = self.__parse_child(elt)
if self.address is not None:
self.address.checkResponse(ps, kw.get('wsaction'))
return reply
class NamedParamBinding(Binding):
'''Like Binding, except the argument list for invocation is
named parameters.
'''
logger = _GetLogger('ZSI.client.Binding')
def __getattr__(self, name):
'''Return a callable object that will invoke the RPC method
named by the attribute.
'''
if name[:2] == '__' and len(name) > 5 and name[-2:] == '__':
if hasattr(self, name): return getattr(self, name)
return getattr(self.__class__, name)
return _NamedParamCaller(self, name, self.namespace)
if __name__ == '__main__': print _copyright
| {
"content_hash": "a49723c103c880d82a2708f527093a56",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 152,
"avg_line_length": 38.39168110918544,
"alnum_prop": 0.574349945828819,
"repo_name": "acigna/pywez",
"id": "2989acc9d9a02b49a310203e4d60b1f0924d741f",
"size": "22248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zsi/ZSI/client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "338"
},
{
"name": "CSS",
"bytes": "11480"
},
{
"name": "Python",
"bytes": "1095192"
},
{
"name": "Shell",
"bytes": "278"
},
{
"name": "TeX",
"bytes": "152117"
}
],
"symlink_target": ""
} |
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend([
ExtractBitsFromFloat16(x) for x in proto_values])
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
# TODO(sesse): We should have a
# fast_tensor_util.AppendFloat16ArrayToTensorProto,
# but it seems np.float16_t doesn't exist?
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16: fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, list):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32,
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: _FilterBool,
dtypes.complex128: _FilterComplex,
dtypes.complex64: _FilterComplex,
dtypes.float32: _FilterFloat,
dtypes.float64: _FilterFloat,
dtypes.int16: _FilterInt,
dtypes.int32: _FilterInt,
dtypes.int64: _FilterInt,
dtypes.int8: _FilterInt,
dtypes.qint16: _FilterInt,
dtypes.qint32: _FilterInt,
dtypes.qint8: _FilterInt,
dtypes.quint16: _FilterInt,
dtypes.quint8: _FilterInt,
dtypes.string: _FilterStr,
dtypes.uint16: _FilterInt,
dtypes.uint8: _FilterInt,
}
def _AssertCompatible(values, dtype):
fn = _TF_TO_IS_OK.get(dtype, _FilterNotTensor)
mismatch = fn(values)
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
np_dt = dtype.as_numpy_dtype if dtype else None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" % (
values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and not dtype.base_dtype == numpy_dtype.base_dtype:
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16,
dtypes.int8, dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.qint16, dtypes.quint16, dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
tensor.dcomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return input_shape.ndims
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
else:
return None
def constant_value(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
NOTE: If `constant_value(tensor)` returns a non-`None` result, it will no
longer be possible to feed a different value for `tensor`. This allows the
result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
ret = _ConstantValue(tensor)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
else:
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(tensor_shape.TensorShape(
[d if d != -1 else None for d in value]))
return ret
| {
"content_hash": "ce311bb2577d12b4b88a76d43f7363e2",
"timestamp": "",
"source": "github",
"line_count": 667,
"max_line_length": 80,
"avg_line_length": 36.56521739130435,
"alnum_prop": 0.6860879904875149,
"repo_name": "HaebinShin/tensorflow",
"id": "098558fd3e3d1474794f9c7903ec8bb6e7d75745",
"size": "25079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/tensor_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "176349"
},
{
"name": "C++",
"bytes": "10558866"
},
{
"name": "CMake",
"bytes": "34638"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "865714"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10609"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "20930"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45677"
},
{
"name": "Protocol Buffer",
"bytes": "118214"
},
{
"name": "Python",
"bytes": "8858431"
},
{
"name": "Shell",
"bytes": "234426"
},
{
"name": "TypeScript",
"bytes": "428153"
}
],
"symlink_target": ""
} |
try:
import usocket as socket, uselect as select, uerrno as errno
except ImportError:
try:
import socket, select, errno
select.poll # Raises AttributeError for CPython implementations without poll()
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
poller = select.poll()
s = socket.socket()
poller.register(s)
# https://docs.python.org/3/library/select.html#select.poll.register
# "Registering a file descriptor that’s already registered is not an error,
# and has the same effect as registering the descriptor exactly once."
poller.register(s)
# 2 args are mandatory unlike register()
try:
poller.modify(s)
except TypeError:
print("modify:TypeError")
poller.modify(s, select.POLLIN)
poller.unregister(s)
try:
poller.modify(s, select.POLLIN)
except OSError as e:
assert e.args[0] == errno.ENOENT
# poll after closing the socket, should return POLLNVAL
poller.register(s)
s.close()
p = poller.poll(0)
print(len(p), p[0][-1])
| {
"content_hash": "149ff7107a2604da9a7660c440f86bbf",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 24.73170731707317,
"alnum_prop": 0.7159763313609467,
"repo_name": "trezor/micropython",
"id": "82a7195c03b2a215a787e74b87ad492329a9e2bb",
"size": "1016",
"binary": false,
"copies": "2",
"ref": "refs/heads/trezor-v1.12",
"path": "tests/extmod/uselect_poll_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "106243"
},
{
"name": "C",
"bytes": "12090846"
},
{
"name": "C++",
"bytes": "570652"
},
{
"name": "CMake",
"bytes": "800"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "105968"
},
{
"name": "Objective-C",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "841669"
},
{
"name": "Shell",
"bytes": "13882"
}
],
"symlink_target": ""
} |
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
| {
"content_hash": "bde7b626b11412ea086b536549f9396f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 31,
"avg_line_length": 20,
"alnum_prop": 0.4642857142857143,
"repo_name": "quake0day/oj",
"id": "072ec53bf88deaf681f1c08c491d2cf82401528c",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Unique Binary Search Trees II_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5897"
},
{
"name": "Java",
"bytes": "691"
},
{
"name": "JavaScript",
"bytes": "1131"
},
{
"name": "Python",
"bytes": "294929"
}
],
"symlink_target": ""
} |
import sys
import json
import codecs
import re
import copy
import getopt
#for now contains dummy rules
class ValidationRule:
def __init__(self, type, param = None):
if type == "USER_REGEX":
type = ValidationType.USER_REGEX
if type == "NOT_NULL":
type = ValidationType.NOT_NULL
if type == "IS_URL":
type = ValidationType.IS_URL
if type == "MIN_ROWS":
type = ValidationType.MIN_ROWS
if type == "MAX_ROWS":
type = ValidationType.MAX_ROWS
if type == "RULE1":
type = ValidationType.RULE1
if type == "RULE2":
type = ValidationType.RULE2
if type == "RULE3":
type = ValidationType.RULE3
self.__method = type
self.__param = param
def execute_rule(self, field_extraction):
#print "in execute_rule"
return self.__method(field_extraction, self.__param)
#apply user defined regular expreassion on extract
def user_regex(self, field_extraction, regex):
#print "in user_regex"
extract = field_extraction['extract']
regex = re.compile(regex)
# apply pattern
if regex.match(extract):
# print "matched:", extract
return True
else:
return False
#check if extract is null or empty string
def not_null(self, field_extraction, param = None):
#print "in not_null"
extract = field_extraction['extract']
if extract is None or not extract:
return False
else:
return True
def is_url(self, field_extraction, param = None):
#print "in is_url"
extract = field_extraction['extract']
regex = re.compile(
#r'^(?:file):///' # http:// or https://
r'^(?:http|ftp)s?://' # http:// or https://
#r'^((?:http|ftp)s?://)|((?:file):///)' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
#^(?:http|ftp)s?:\/\/(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(?::\d+)?(?:\/?|[\/?]\S+)$
if regex.match(extract):
# print "matched:", extract
return True
else:
return False
#returns true if the list contains at least min_rows
def min_rows(self, list_extraction, min_rows):
#print "in min_rows"
#this is a list of rows
#print "list:", list_extraction
rows = list_extraction['sequence']
if len(rows) < int(min_rows):
return False
else:
return True
#returns true if the list contains at most max_rows
def max_rows(self, list_extraction, max_rows):
#print "in max_rows"
#this is a list of rows
rows = list_extraction['sequence']
print("rows:", len(rows))
if len(rows) > int(max_rows):
return False
else:
return True
def rule1(self, extract, param = None):
#print "in valid1"
return True
def rule2(self, extract, param = None):
#print "in valid2"
return True
def rule3(self, extract, param = None):
#print "in valid3"
return False
def __str__(self):
output = str('rule:' + self.__method)
return output
class ValidationType:
USER_REGEX = ValidationRule.user_regex
NOT_NULL = ValidationRule.not_null
IS_URL = ValidationRule.is_url
MIN_ROWS = ValidationRule.min_rows
MAX_ROWS = ValidationRule.max_rows
RULE1 = ValidationRule.rule1
RULE2 = ValidationRule.rule2
RULE3 = ValidationRule.rule3
#look at end of file for sample of extraction with validation metadata
#applies validation rules to extracted data and returns extraction json with validation metadata
class Validation:
def __init__(self, validation_json):
# validation_json = {"gh56fj78": [see below ...]}]
# __validation: key = fieldid, value = json array with validation rules
# [ { "type": "USER_REGEX",
# "param": "the regex" },
# { "type": "NOT_NULL" },
# etc.}
self.__validation = validation_json
def get_validations(self):
return self.__validation
#applies validation on extraction and outputs extraction with metadata
def validate_extraction(self, extraction):
extraction_copy = copy.deepcopy(extraction)
for page in extraction:
self.validate_page(extraction_copy[page])
return extraction_copy
#validate data on one oage
def validate_page(self, page_extraction):
for fieldid in self.__validation:
self.validate_page_for_field(page_extraction, fieldid, self.__validation[fieldid])
return page_extraction
#find field that needs to be validated and validate that field
def validate_page_for_field(self, page_extraction, field_id, validation_rules):
#print "page extraction ...", page_extraction
if isinstance(page_extraction, list):
# this is a sequence item so we look at the 'sub_rules' if they exist
for row in page_extraction:
#print 'row ...', row
if 'sub_rules' in row:
#print "in subrules..."
self.validate_page_for_field(row['sub_rules'], field_id, validation_rules)
elif isinstance(page_extraction, dict):
#print 'dict ...', page_extraction
# this is a standard rule so we look at this one itself
for name in page_extraction:
#print 'name ...', name
if 'rule_id' in page_extraction[name] and page_extraction[name]['rule_id'] == field_id:
#found field that needs validation
self.validate_field(page_extraction[name], validation_rules)
elif 'sub_rules' in page_extraction[name]:
#print "in subrules ..."
self.validate_page_for_field(page_extraction[name]['sub_rules'], field_id, validation_rules)
elif 'sequence' in page_extraction[name]:
#print "in sequence..."
self.validate_page_for_field(page_extraction[name]['sequence'], field_id, validation_rules)
#validate one field
def validate_field(self, field_extraction, validation_rules):
#print 'validate field ...', field_extraction
for validation_rule in validation_rules:
is_valid = self.validate_value(field_extraction, validation_rule)
#construct validation json to be returned
validation_rule['valid'] = is_valid
self.add_validation(field_extraction, validation_rule)
# validate one field
def validate_field_extraction(self, field_extraction):
# print 'validate field ...', field_extraction
if field_extraction['fieldid'] in self.__validation:
for validation_rule in self.__validation[field_extraction['fieldid']]:
is_valid = self.validate_value(field_extraction, validation_rule)
# construct validation json to be returned
validation_rule['valid'] = is_valid
self.add_validation(field_extraction, validation_rule)
else:
#no validation for this field
field_extraction['valid'] = True
field_extraction['validation'] = None
#validate given extraction; could be a simple field or a list
# we handle it differently in the rule execution
#validation_rule = {type = ValidationType.RULE1, param = param if needed}
def validate_value(self, field_extraction, validation_rule):
validation_param = None
if 'param' in validation_rule:
validation_param = validation_rule['param']
one_validation_rule = ValidationRule(validation_rule['type'], validation_param)
return one_validation_rule.execute_rule(field_extraction)
#add validation to field
def add_validation(self, field_extraction, field_validation):
if 'validation' in field_extraction:
# already contains some validation; append to it and reset "valid" for this field
field_extraction['validation'].append(field_validation)
if field_extraction['valid'] is True and field_validation['valid'] is False:
field_extraction['valid'] = False
else:
validation = []
validation.append(field_validation)
field_extraction['validation'] = validation
field_extraction['valid'] = field_validation['valid']
#schema_json is an array with one element (root)
def get_schema_with_validation(self, schema_json):
#schema_json[0]['list'] contains list of fields
#a field can be a simple field or another list
self.add_validation_to_schema(schema_json[0]['list'])
#print json.dumps(schema_json, sort_keys=True, indent=2, separators=(',', ': '))
return schema_json
def add_validation_to_schema(self, schema_field_list):
for field in schema_field_list:
if field['schemaid'] in self.__validation:
# add it
field['validation'] = self.__validation[field['schemaid']]
if 'list' in field:
#this is a list field
self.add_validation_to_schema(field['list'])
#validation is either
# {"valid":true} OR
# {"valid":false}
@staticmethod
def get_validation_for_page_1(page_extraction, validation):
# if I found something false everything is false
if validation['valid'] is False:
return
if isinstance(page_extraction, list):
# this is a sequence item so we look at the 'sub_rules' if they exist
for row in page_extraction:
#print 'row:',row
if 'sub_rules' in row:
#print "in subrules...."
Validation.get_validation_for_page_1(row['sub_rules'], validation)
elif isinstance(page_extraction, dict):
#print 'dict ...'
# this is a standard rule so we look at this one itself
for name in page_extraction:
#print 'name ...', name
if 'valid' in page_extraction[name]:
#found field with validation
if page_extraction[name]['valid'] is False:
validation['valid'] = False
#print "False..."
return
if 'sub_rules' in page_extraction[name]:
#print "in subrules...."
Validation.get_validation_for_page_1(page_extraction[name]['sub_rules'], validation)
elif 'sequence' in page_extraction[name]:
#print "in sequence ..."
Validation.get_validation_for_page_1(page_extraction[name]['sequence'], validation)
#returns either true or false; if it finds one false validation it returns false
# {"valid":true} OR
# {"valid":false}
@staticmethod
def get_validation_for_extraction(extraction):
validation = {}
validation['valid'] = True
for page in extraction:
Validation.get_validation_for_page_1(extraction[page], validation)
if validation['valid'] is False:
return validation
return validation
#returns either true or false; if it finds one false validation it returns false
# {"valid":true} OR
# {"valid":false}
@staticmethod
def get_validation_for_page(page_extraction):
validation = {}
validation['valid'] = True
Validation.get_validation_for_page_1(page_extraction, validation)
return validation
def __str__(self):
output = str(self.__validation)
return output
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
# with codecs.open("validation_1.json", "r", "utf-8") as myfile:
# file_str = myfile.read().encode('utf-8')
# validation_json = json.loads(file_str)
# v = Validation(validation_json)
# print "validation:", v
#
# with codecs.open("page_extraction_2.json", "r", "utf-8") as myfile:
# page_str = myfile.read().encode('utf-8')
# page_json = json.loads(page_str)
# new_page_json = v.validate_extraction(page_json)
# print "end page:", new_page_json
# print json.dumps(new_page_json, indent=2, separators=(',', ': '))
#
# result = Validation.get_validation_for_extraction(new_page_json)
# print json.dumps(result, indent=2, separators=(',', ': '))
# sys.exit()
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "sh", ["simple", "help"])
simple = False
for opt in opts:
if opt in [('-s', ''), ('--simple', '')]:
simple = True
if opt in [('-h', ''), ('--help', '')]:
raise Usage(
'python validation/Validation.py [OPTIONAL_PARAMS] [FILE_WITH_VALIDATION] [FILE_WITH_EXTRACT]\n\t[OPTIONAL_PARAMS]: -s to return only true/false')
except getopt.error as msg:
raise Usage(msg)
if len(args) != 2:
raise Usage('python validation/Validation.py [OPTIONAL_PARAMS] [FILE_WITH_VALIDATION] [FILE_WITH_EXTRACT]\n\t[OPTIONAL_PARAMS]: -s to return only true/false')
validation_file = args[0]
extraction_file = args[1]
with codecs.open(validation_file, "r", "utf-8") as myfile:
validation_str = myfile.read().encode('utf-8')
validation_json = json.loads(validation_str)
v = Validation(validation_json)
with codecs.open(extraction_file, "r", "utf-8") as myfile:
extraction_str = myfile.read().encode('utf-8')
page_json = json.loads(extraction_str)
new_page_json = v.validate_page(page_json)
if simple:
result = Validation.get_validation_for_page(new_page_json)
print(json.dumps(result, indent=2, separators=(',', ': ')))
else:
print(json.dumps(new_page_json, indent=2, separators=(',', ': ')))
except Usage as err:
print(err.msg, end='', file=sys.stderr)
print("for help use --help", end='', file=sys.stderr)
return 2
if __name__ == "__main__":
sys.exit(main())
#Sample of extraction with validation metadata
#
# "extraction": {
# "page1": {
# "-whataboutthistable0005": {
# "begin_index": 87,
# "end_index": 96,
# "extract": "Firstname",
# "rule_id": "aa75710a-334d-458e-bcb5-f4298c8ea99b"
# },
# "0007": {
# "begin_index": 106,
# "end_index": 114,
# "extract": "Lastname",
# "rule_id": "607d9120-c56d-4c34-966c-3d470037f2ad",
# "valid": false,
# "validation": [
# {
# "param": "param1",
# "type": "RULE1",
# "valid": true
# },
# {
# "type": "RULE3",
# "valid": false
# }
# ]
# },
# "0012": {
# "begin_index": 201,
# "end_index": 239,
# "extract": "Bubba</td> <td>Yourd</td> <td>66</td>",
# "rule_id": "db221681-c05c-41e5-a22c-f4070e9314ff"
# },
# "Age0011": {
# "begin_index": 148,
# "end_index": 186,
# "extract": "Bill</td> <td>Wilson</td> <td>33</td>",
# "rule_id": "07fdbe8d-217b-47ae-b8ba-c2e8d5b43980"
# },
# "_list0001": {
# "begin_index": 50,
# "end_index": 313,
# "extract": " <table style=\"width:100%\"> <tr> <th>Firstname</th> <th>Lastname</th> <th>Age</th> </tr> <tr> <td>Bill</td> <td>Wilson</td> <td>33</td> </tr> <tr> <td>Bubba</td> <td>Yourd</td> <td>66</td> </tr> </table> ENDOFPAGE",
# "rule_id": "c06c2aa4-6150-4968-9968-2cb37cb6de13",
# "sequence": [
# {
# "begin_index": 87,
# "end_index": 131,
# "extract": "Firstname</th> <th>Lastname</th> <th>Age</th",
# "sequence_number": 1
# },
# {
# "begin_index": 148,
# "end_index": 184,
# "extract": "Bill</td> <td>Wilson</td> <td>33</td",
# "sequence_number": 2
# },
# {
# "begin_index": 201,
# "end_index": 237,
# "extract": "Bubba</td> <td>Yourd</td> <td>66</td",
# "sequence_number": 3
# }
# ],
# "valid": false,
# "validation": [
# {
# "param": "param3",
# "type": "RULE3",
# "valid": false
# }
# ]
# },
# "us_state0001": {
# "begin_index": 16,
# "end_index": 20,
# "extract": "me",
# "rule_id": "180c5bf4-50ee-42f0-b0f1-6339d290f207"
# }
# }
# }
# }} | {
"content_hash": "452a2f54b84e70e08a9acdc6707af21c",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 239,
"avg_line_length": 38.25934065934066,
"alnum_prop": 0.5519875919117647,
"repo_name": "usc-isi-i2/etk",
"id": "6e999eae2b0df93aede2c470f8bec37f42d421f2",
"size": "17408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etk/dependencies/landmark/landmark_extractor/validation/Validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "1048891"
},
{
"name": "Julia",
"bytes": "874347"
},
{
"name": "Jupyter Notebook",
"bytes": "123779"
},
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "807682"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
lista1 = []
lista2 = []
continuar = int(raw_input("¿Cuantas personas desea añadir?: "))
while int (continuar) != len (lista1):
personas=str(raw_input("Nombre de la persona: "))
lista1.append(personas)
edades=str(raw_input("Edad de la persona: "))
lista2.append(edades)
print ""
print "Ha ingresado", len (lista1), "personas"
print "--------------------------------"
p=len(lista1)
mayor=0
for i in range (p):
if lista1[i] > mayor:
mayor = lista1[i]
print "El numero mayor es", mayor
| {
"content_hash": "26b9659f7fe44eb69339a1bb75777fa1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 25.238095238095237,
"alnum_prop": 0.5886792452830188,
"repo_name": "Kings42/HelloWorld",
"id": "2b4c40d369dd2c4ff2f90954727297b6e2fe228e",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Edades.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98"
},
{
"name": "HTML",
"bytes": "2172"
},
{
"name": "Python",
"bytes": "25024"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from hbase import hbase
from hbase_service import hbase_service
import upgrade
from setup_ranger_hbase import setup_ranger_hbase
class HbaseRegionServer(Script):
def get_component_name(self):
return "hbase-regionserver"
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hbase(name='regionserver')
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
upgrade.prestart(env, "hbase-regionserver")
def post_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
upgrade.post_regionserver(env)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # for security
setup_ranger_hbase(upgrade_type=upgrade_type)
hbase_service( 'regionserver',
action = 'start'
)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
hbase_service( 'regionserver',
action = 'stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
check_process_status(pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"hbase.security.authentication" : "kerberos",
"hbase.security.authorization": "true"}
props_empty_check = ['hbase.regionserver.keytab.file',
'hbase.regionserver.kerberos.principal']
props_read_check = ['hbase.regionserver.keytab.file']
hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
props_read_check)
hbase_expectations = {}
hbase_expectations.update(hbase_site_expectations)
security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
{'hbase-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, hbase_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'hbase-site' not in security_params
or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hbase_user,
security_params['hbase-site']['hbase.regionserver.keytab.file'],
security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def decommission(self, env):
print "Decommission not yet implemented!"
if __name__ == "__main__":
HbaseRegionServer().execute()
| {
"content_hash": "6b93e39877b1f1ec3b5f22af0bbdd0b9",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 114,
"avg_line_length": 38.7557251908397,
"alnum_prop": 0.6604293874335238,
"repo_name": "alexryndin/ambari",
"id": "d9bf76c3aa4e253c12c854d45f1ddf29636f7395",
"size": "5099",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/hbase_regionserver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""
Text Segmentation Metrics
1. Windowdiff
Pevzner, L., and Hearst, M., A Critique and Improvement of
an Evaluation Metric for Text Segmentation,
Computational Linguistics 28, 19-36
2. Generalized Hamming Distance
Bookstein A., Kulyukin V.A., Raita T.
Generalized Hamming Distance
Information Retrieval 5, 2002, pp 353-375
Baseline implementation in C++
http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html
Study describing benefits of Generalized Hamming Distance Versus
WindowDiff for evaluating text segmentation tasks
Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ?
TALN 2009
3. Pk text segmentation metric
Beeferman D., Berger A., Lafferty J. (1999)
Statistical Models for Text Segmentation
Machine Learning, 34, 177-210
"""
try:
import numpy as np
except ImportError:
pass
def windowdiff(seg1, seg2, k, boundary="1", weighted=False):
"""
Compute the windowdiff score for a pair of segmentations. A
segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
>>> s1 = "000100000010"
>>> s2 = "000010000100"
>>> s3 = "100000010000"
>>> '%.2f' % windowdiff(s1, s1, 3)
'0.00'
>>> '%.2f' % windowdiff(s1, s2, 3)
'0.30'
>>> '%.2f' % windowdiff(s2, s3, 3)
'0.80'
:param seg1: a segmentation
:type seg1: str or list
:param seg2: a segmentation
:type seg2: str or list
:param k: window width
:type k: int
:param boundary: boundary value
:type boundary: str or int or bool
:param weighted: use the weighted variant of windowdiff
:type weighted: boolean
:rtype: float
"""
if len(seg1) != len(seg2):
raise ValueError("Segmentations have unequal length")
if k > len(seg1):
raise ValueError(
"Window width k should be smaller or equal than segmentation lengths"
)
wd = 0
for i in range(len(seg1) - k + 1):
ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary))
if weighted:
wd += ndiff
else:
wd += min(1, ndiff)
return wd / (len(seg1) - k + 1.0)
# Generalized Hamming Distance
def _init_mat(nrows, ncols, ins_cost, del_cost):
mat = np.empty((nrows, ncols))
mat[0, :] = ins_cost * np.arange(ncols)
mat[:, 0] = del_cost * np.arange(nrows)
return mat
def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff):
for i, rowi in enumerate(rowv):
for j, colj in enumerate(colv):
shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j]
if rowi == colj:
# boundaries are at the same location, no transformation required
tcost = mat[i, j]
elif rowi > colj:
# boundary match through a deletion
tcost = del_cost + mat[i, j + 1]
else:
# boundary match through an insertion
tcost = ins_cost + mat[i + 1, j]
mat[i + 1, j + 1] = min(tcost, shift_cost)
def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"):
"""
Compute the Generalized Hamming Distance for a reference and a hypothetical
segmentation, corresponding to the cost related to the transformation
of the hypothetical segmentation into the reference segmentation
through boundary insertion, deletion and shift operations.
A segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
Recommended parameter values are a shift_cost_coeff of 2.
Associated with a ins_cost, and del_cost equal to the mean segment
length in the reference segmentation.
>>> # Same examples as Kulyukin C++ implementation
>>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5)
0.5
>>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5)
2.0
>>> ghd('011', '110', 1.0, 1.0, 0.5)
1.0
>>> ghd('1', '0', 1.0, 1.0, 0.5)
1.0
>>> ghd('111', '000', 1.0, 1.0, 0.5)
3.0
>>> ghd('000', '111', 1.0, 2.0, 0.5)
6.0
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the hypothetical segmentation
:type hyp: str or list
:param ins_cost: insertion cost
:type ins_cost: float
:param del_cost: deletion cost
:type del_cost: float
:param shift_cost_coeff: constant used to compute the cost of a shift.
``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j``
are the positions indicating the shift
:type shift_cost_coeff: float
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
ref_idx = [i for (i, val) in enumerate(ref) if val == boundary]
hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary]
nref_bound = len(ref_idx)
nhyp_bound = len(hyp_idx)
if nref_bound == 0 and nhyp_bound == 0:
return 0.0
elif nref_bound > 0 and nhyp_bound == 0:
return nref_bound * ins_cost
elif nref_bound == 0 and nhyp_bound > 0:
return nhyp_bound * del_cost
mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost)
_ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff)
return mat[-1, -1]
# Beeferman's Pk text segmentation evaluation metric
def pk(ref, hyp, k=None, boundary="1"):
"""
Compute the Pk metric for a pair of segmentations A segmentation
is any sequence over a vocabulary of two items (e.g. "0", "1"),
where the specified boundary value is used to mark the edge of a
segmentation.
>>> '%.2f' % pk('0100'*100, '1'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0100'*100, 2)
'0.00'
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the segmentation to evaluate
:type hyp: str or list
:param k: window size, if None, set to half of the average reference segment length
:type boundary: str or int or bool
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
if k is None:
k = int(round(len(ref) / (ref.count(boundary) * 2.0)))
err = 0
for i in range(len(ref) - k + 1):
r = ref[i : i + k].count(boundary) > 0
h = hyp[i : i + k].count(boundary) > 0
if r != h:
err += 1
return err / (len(ref) - k + 1.0)
| {
"content_hash": "eef8739cd14c1a96f0838bdd67a498ef",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 87,
"avg_line_length": 31.55188679245283,
"alnum_prop": 0.6065181641500972,
"repo_name": "nltk/nltk",
"id": "62e07892f9e054f5e17fe56745bb2f2e9561e0b8",
"size": "6999",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nltk/metrics/segmentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "24786"
},
{
"name": "Jupyter Notebook",
"bytes": "55608"
},
{
"name": "Makefile",
"bytes": "7983"
},
{
"name": "Python",
"bytes": "4831858"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
"""
(c) 2017 David Barroso <dbarrosop@dravetech.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals, print_function
from ansible.module_utils.basic import AnsibleModule
try:
import napalm_yang
except ImportError:
napalm_yang = None
DOCUMENTATION = """
---
module: napalm_diff_yang
author: "David Barroso (@dbarrosop)"
version_added: "0.0"
short_description: "Return diff of two YANG objects"
description:
- "Create two YANG objects from dictionaries and runs mehtod"
- "napalm_yang.utils.diff on them."
requirements:
- napalm-yang
options:
models:
description:
- List of models to parse
required: True
first:
description:
- Dictionary with the data to load into the first YANG object
required: True
second:
description:
- Dictionary with the data to load into the second YANG object
required: True
"""
EXAMPLES = """
- napalm_diff_yang:
first: "{{ candidate.yang_model }}"
second: "{{ running_config.yang_model }}"
models:
- models.openconfig_interfaces
register: diff
"""
RETURN = """
diff:
description: "Same output as the method napalm_yang.utils.diff"
returned: always
type: dict
sample: '{
"interfaces": {
"interface": {
"both": {
"Port-Channel1": {
"config": {
"description": {
"first": "blah",
"second": "Asadasd"
}
}
}
}
}
}'
"""
def get_root_object(models):
"""
Read list of models and returns a Root object with the proper models added.
"""
root = napalm_yang.base.Root()
for model in models:
current = napalm_yang
for p in model.split("."):
current = getattr(current, p)
root.add_model(current)
return root
def main():
module = AnsibleModule(
argument_spec=dict(
models=dict(type="list", required=True),
first=dict(type="dict", required=True),
second=dict(type="dict", required=True),
),
supports_check_mode=True,
)
if not napalm_yang:
module.fail_json(msg="the python module napalm-yang is required")
first = get_root_object(module.params["models"])
first.load_dict(module.params["first"])
second = get_root_object(module.params["models"])
second.load_dict(module.params["second"])
diff = napalm_yang.utils.diff(first, second)
module.exit_json(yang_diff=diff)
if __name__ == "__main__":
main()
| {
"content_hash": "2eec198e84f56a582f2a456a4a414e27",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 26.96825396825397,
"alnum_prop": 0.5950559152442614,
"repo_name": "napalm-automation/napalm-ansible",
"id": "8fdcb6ae1bea2ced7c08137948417a75b6a1aa98",
"size": "3440",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "napalm_ansible/modules/napalm_diff_yang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65389"
},
{
"name": "Roff",
"bytes": "8257"
},
{
"name": "Shell",
"bytes": "2083"
}
],
"symlink_target": ""
} |
'''PipelineGeneset.py - Tasks for processing gene sets
======================================================
Most of this tasks take a geneset (.gtf.gz) from ENSEMBL as input.
As of ENSEMBL release 75 the gtf file contains both transcripts but
also untranscribed features such as pseudo genes, for example::
1 pseudogene gene 11869 14412 . + . gene_id "ENSG00000223972"; gene_name "DDX11L1"; gene_source "ensembl_havana"; gene_biotype "pseudogene";
Reference
---------
'''
import os
import collections
import sqlite3
import pandas as pd
import CGAT.IOTools as IOTools
import CGATPipelines.Pipeline as P
import CGAT.Experiment as E
import CGAT.GTF as GTF
import CGAT.IndexedFasta as IndexedFasta
# When importing this module, set PARAMS to your parameter
# dictionary
PARAMS = {}
ENSEMBL_INFO = collections.namedtuple(
"ENSEMBLINFO", "species gene_prefix transcript_prefix")
# Map of UCSC genome prefixes to ENSEMBL gene sets
MAP_UCSC2ENSEMBL = {
'hg': ENSEMBL_INFO._make(('Homo_sapiens',
'ENSG',
'ENST')),
'mm': ENSEMBL_INFO._make(('Mus_musculus',
'ENSMUSG',
'ENSMUST')),
'rn': ENSEMBL_INFO._make(('Rattus_norvegicus',
'ENSRNOG',
'ENSRNOT')),
}
def mapUCSCToEnsembl(genome):
'''map the name of a UCSC genome (hg19, mm10) to
ENSEMBL URLs.'''
prefix = genome[:2]
return MAP_UCSC2ENSEMBL[prefix]
def annotateGenome(infile, outfile,
only_proteincoding=False,
job_memory="4G"):
'''annotate genomic regions with reference gene set.
The method applies the following filters to an ENSEMBL gene set:
* Select transcribed features, i.e., those entries that contain a
``transcript_id``.
* Merge overlapping exons from different transcripts within a
gene.
* In case of overlapping genes, take the longest gene in genomic
coordinates.
The resultant gene set is then converted to genomic annotations
such as exonic, intronic, intergenic. For more information, see
documentation for the script :mod:`gtf2gff.py` under the option
``--method=genome``.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
only_proteincoding : bool
If True, only consider protein coding genes.
'''
method = "genome"
if only_proteincoding:
filter_cmd = """cgat gtf2gtf
--method=filter --filter-method=proteincoding""" % PARAMS
else:
filter_cmd = "cat"
statement = """
zcat %(infile)s
| %(filter_cmd)s
| grep "transcript_id"
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript
| cgat gtf2gtf
--method=set-source-to-transcript_biotype
| cgat gtf2gtf
--method=merge-exons
--mark-utr
--log=%(outfile)s.log
| cgat gtf2gtf
--method=filter --filter-method=longest-gene
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort --sort-order=position
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--flank-size=%(enrichment_genes_flank)s
--method=%(method)s
| gzip
> %(outfile)s
"""
P.run()
def annotateGeneStructure(infile, outfile,
only_proteincoding=False,
job_memory="4G"):
"""annotate genomic regions with gene structure.
The method applies the following filters to an ENSEMBL gene set:
* Select transcribed features, i.e., those entries that contain a
``transcript_id``.
* If there are multiple transcripts per gene, take a
representative transcript. See :mod:`gtf2gtf` for the definition
of the representative transcript.
* In case of overlapping genes, take the longest gene in genomic
coordinates.
The resultant gene set is then converted to genomic annotations
such as first_exon, first_intron, .... For more information, see
documentation for the script :mod:`gtf2gff.py` under the option
``--method=genes``.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
only_proteincoding : bool
If True, only consider protein coding genes.
"""
if only_proteincoding:
filter_cmd = """cgat gtf2gtf
--method=filter --filter-method=proteincoding""" % PARAMS
else:
filter_cmd = "cat"
method = "genes"
statement = """
gunzip
< %(infile)s
| %(filter_cmd)s
| awk '$3 == "exon"'
| grep "transcript_id"
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript
| cgat gtf2gtf
--method=filter --filter-method=representative-transcript
| cgat gtf2gtf
--method=filter --filter-method=longest-gene
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort --sort-order=position
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--flank-size=%(enrichment_genestructures_flank)i
--flank-increment-size=%(enrichment_genestructures_increment)i
--method=%(method)s
--gene-detail=exons
| gzip
> %(outfile)s
"""
P.run()
def buildFlatGeneSet(infile, outfile):
'''build a flattened gene set.
All transcripts in a gene are merged into a single transcript by
combining overlapping exons.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
# sort by contig+gene, as in refseq gene sets, genes on
# chr_random might contain the same identifier as on chr
# and hence merging will fail.
# --permit-duplicates is set so that these cases will be
# assigned new merged gene ids.
statement = """gunzip
< %(infile)s
| awk '$3 == "exon"'
| grep "transcript_id"
| cgat gtf2gtf
--method=sort
--sort-order=contig+gene
--log=%(outfile)s.log
| cgat gtf2gtf
--method=merge-exons
--permit-duplicates
--log=%(outfile)s.log
| cgat gtf2gtf
--method=set-transcript-to-gene
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort
--sort-order=position+gene
--log=%(outfile)s.log
| gzip
> %(outfile)s
"""
P.run()
def buildProteinCodingGenes(infile, outfile):
'''build a proctein coding gene set from an ENSEMBL gene set.
The method applies the following filters to an ENSEMBL gene set:
* Select protein coding features.
* Remove features not on the reference genome that has been chosen.
* Merge overlapping exons from different transcripts within a
gene.
* In case of overlapping genes, take the longest gene in genomic
coordinates.
* Keep only features called ``exon`` in the GTF file.
* Set the ``transcript_id`` to the ``gene_id``
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
# sort by contig+gene, as in refseq gene sets, genes on
# chr_random might contain the same identifier as on chr
# and hence merging will fail.
# --permit-duplicates is set so that these cases will be
# assigned new merged gene ids.
statement = """zcat %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
| grep "transcript_id"
| cgat gtf2gtf
--method=sort --sort-order=contig+gene
| cgat gff2gff
--method=sanitize
--sanitize-method=genome
--skip-missing
--genome-file=%(genome_dir)s/%(genome)s
| cgat gtf2gtf
--method=merge-exons
--permit-duplicates
--log=%(outfile)s.log
| cgat gtf2gtf
--method=filter --filter-method=longest-gene
--log=%(outfile)s.log
| awk '$3 == "exon"'
| cgat gtf2gtf
--method=set-transcript-to-gene
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript
| gzip
> %(outfile)s
"""
P.run()
def loadGeneInformation(infile, outfile, only_proteincoding=False):
'''load gene-related attributes from :term:`gtf` file into database.
This method takes transcript-associated features from an
:term:`gtf` file and collects the gene-related attributes in the
9th column of the gtf file, ignoring exon_id, transcript_id,
transcript_name, protein_id and exon_number.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename, contains logging information. The
table name is derived from the filename of outfile.
only_proteincoding : bool
If True, only consider protein coding genes.
'''
job_memory = "4G"
table = P.toTable(outfile)
if only_proteincoding:
filter_cmd = """cgat gtf2gtf
--method=filter --filter-method=proteincoding""" % PARAMS
else:
filter_cmd = "cat"
load_statement = P.build_load_statement(
table,
options="--add-index=gene_id "
"--add-index=gene_name"
"--map=gene_name:str")
statement = '''
zcat %(infile)s
| %(filter_cmd)s
| grep "transcript_id"
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript
| cgat gtf2tsv
--attributes-as-columns --output-only-attributes -v 0
| python %(toolsdir)s/csv_cut.py
--remove exon_id transcript_id transcript_name protein_id exon_number
| %(pipeline_scriptsdir)s/hsort 1
| uniq
| %(load_statement)s
> %(outfile)s'''
P.run()
def loadEnsemblTranscriptInformation(ensembl_gtf, geneset_gtf,
outfile,
csvdb,
set_biotype=None,
set_transcript_support=None):
'''
Parse and annotate a geneset_gtf using the original Ensembl
GTF attributes.
The ensembl GTF structure is not static, so this needs to maintain
backwards compatibility. For certain versions, attributes may be
present in later versions which are used downstream. These should
be set with default/missing values if they are not natively present.
Therefore, gene_biotype is taken from the "feature" field if it is
not present, and transcript_support = NA if missing.
Arguments
---------
ensembl_gtf: string
PATH to ensemlb gtf containing all annotation information and
attributes
geneset_gtf: string
PATH to the geneset GTF to annotate with ensembl attributes
outfile: string
PATH to output filtered, annotated and sorted by gene position
csvdb: string
PATH to the SQLite database to upload transcript information
table
ensembl_version: int
Ensembl build version used
set_biotype: string
should the gene_ and transcript_biotype columns be set
to a default value. If false, and not present, default
value is to use the "feature" attribute
set_transcript_support: int
should the transcript_support_level be set to a default value,
if not it will be set to NA
'''
table = P.toTable(outfile)
gtf_file = IOTools.openFile(geneset_gtf, "rb")
gtf_iterator = GTF.transcript_iterator(GTF.iterator(gtf_file))
ensembl_file = IOTools.openFile(ensembl_gtf, "rb")
ensembl_iterator = GTF.transcript_iterator(GTF.iterator(ensembl_file))
# parse the two gtfs, creating keys from the GTF entries
parse_ensembl = {}
for ens_gtf in ensembl_iterator:
for ens_trans in ens_gtf:
ens_att = ens_trans.asDict()
ens_vals = dict(zip(ens_trans.keys(),
[ens_trans[x] for x in ens_trans.keys()]))
ens_att.update(ens_vals)
parse_ensembl[ens_trans.transcript_id] = ens_att
ensembl_file.close()
parse_gtf = {}
for gtf in gtf_iterator:
for trans in gtf:
trans_atts = trans.asDict()
trans_vals = dict(zip(trans.keys(),
[trans[g] for g in trans.keys()]))
trans_atts.update(trans_vals)
parse_gtf[trans.transcript_id] = trans_atts
gtf_file.close()
# convert to dataframe for easier merging, annotating
# and ultimately SQL database insertion
# these are large dictionaries to parse, so might
# be quite memory and compute heavy
ensembl_df = pd.DataFrame(parse_ensembl).T
gtf_df = pd.DataFrame(parse_gtf).T
# check for presence of gene_biotype and
# transcript_support_level
merged_df = pd.merge(gtf_df, ensembl_df,
left_on=[cx for cx in gtf_df.columns],
right_on=[rx for rx in gtf_df.columns],
how='left')
try:
merged_df["transcript_support_level"]
E.info("transcript_support_level is present")
except KeyError:
E.info("transcript_support_level is not present")
if set_transcript_support:
merged_df["transcript_support_level"] = set_transcript_support
else:
merged_df["transcript_support_level"] = "NA"
try:
merged_df["gene_biotype"]
E.info("gene biotype is present")
try:
merged_df["transcript_biotype"]
E.info("transcript biotype is present")
except KeyError:
E.info("transcript biotype is not present")
if set_biotype:
merged_df["transcript_biotype"] = set_biotype
else:
merged_df["transcript_biotype"] = "NA"
except KeyError:
E.info("gene biotype is not present")
if set_biotype:
merged_df["gene_biotype"] = set_biotype
merged_df["transcript_biotype"] = set_biotype
else:
merged_df["gene_biotype"] = "NA"
merged_df["transcript_biotype"] = "NA"
# sort on gene then transcript id
# remove exon_number and exon_id to maintain
# compatibility with previous code
try:
merged_df.drop(["exon_id", "exon_number"],
axis=1, inplace=True)
except KeyError:
try:
merged_df.drop(["exon_id"], axis=1,
inplace=True)
except KeyError:
try:
merged_df.drop(["exon_number"],
axis=1, inplace=True)
except KeyError:
pass
# sort the output and load into the csvdb
# add a multindex to use multiple SQL indices
merged_df.sort_values(by=["gene_id",
"transcript_id"],
inplace=True)
merged_df.set_index(["gene_id", "gene_name",
"protein_id", "transcript_id"],
inplace=True, drop=True)
merged_df.to_sql(name=table,
con=sqlite3.connect(csvdb),
if_exists='replace',
index_label=["gene_id",
"gene_name",
"protein_id",
"transcript_id"])
return 1
def loadTranscriptInformation(infile, outfile,
only_proteincoding=False):
'''load transcript-related attributes from :term:`gtf` file into database.
This method takes transcript-associated features from an
:term:`gtf` file and collects the gene-related attributes in the
9th column of the gtf file, ignoring exon_id and exon_number.
To handle different Ensembl versions, gene_biotype and
transcript_support are enforced if they are missing.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename, contains logging information. The
table name is derived from the filename of outfile.
only_proteincoding : bool
If True, only consider protein coding genes.
'''
table = P.toTable(outfile)
if only_proteincoding:
filter_cmd = """cgat gtf2gtf
--method=filter --filter-method=proteincoding""" % PARAMS
else:
filter_cmd = "cat"
load_statement = P.build_load_statement(
table,
options="--add-index=gene_id "
"--add-index=gene_name"
"--add-index=protein_id"
"--add-index=transcript_id"
"--map=gene_name:str")
statement = '''zcat < %(infile)s
| awk '$3 == "CDS"'
| grep "transcript_id"
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript
| cgat gtf2tsv
--attributes-as-columns --output-only-attributes -v 0
| python %(toolsdir)s/csv_cut.py --remove exon_id exon_number
| %(pipeline_scriptsdir)s/hsort 1 | uniq
| %(load_statement)s
> %(outfile)s'''
P.run()
def buildCDNAFasta(infile, outfile):
'''index an ENSEMBL cdna FASTA file
The descriptions in the fasta file are truncated at the
first space to contain only the sequence identifier.
Arguments
---------
infile : string
ENSEMBL ``.cdna.fa.gz`` file in :term:`fasta` format
outfile : string
indexed file in :term:`fasta` format
'''
dbname = outfile[:-len(".fasta")]
statement = '''gunzip
< %(infile)s
| perl -p -e 'if ("^>") { s/ .*//};'
| cgat index_fasta
--force-output
%(dbname)s -
> %(dbname)s.log
'''
P.run()
def buildPeptideFasta(infile, outfile):
'''index an ENSEMBL peptide FASTA file
The descriptions in the fasta file are truncated at the
first space to contain only the sequence identifier.
Arguments
---------
infile : string
ENSEMBL ``.pep.all.fa.gz`` file in :term:`fasta` format
outfile : string
indexed file in :term:`fasta` format
'''
dbname = outfile[:-len(".fasta")]
statement = '''gunzip
< %(infile)s
| perl -p -e 'if ("^>") { s/ .*//};'
| cgat index_fasta
--force-output
%(dbname)s -
> %(dbname)s.log
'''
P.run()
def loadPeptideSequences(infile, outfile):
'''load ENSEMBL peptide file into database
This method removes empty sequences (see for example
transcript:ENSMUST00000151316, ENSMUSP00000118372)
The created table contains the columns ``protein_id``, ``length``
and ``sequence``.
Arguments
---------
infile : string
ENSEMBL ``.pep.all.fa.gz`` file in :term:`fasta` format
outfile : string
filename with logging information. The tablename is
derived from ``outfile``.
'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-protein_id"
"--map=protein_id:str")
statement = '''gunzip
< %(infile)s
| perl -p -e 'if ("^>") { s/ .*//};'
| cgat fasta2fasta --method=filter
--filter-method=min-length=1
| cgat fasta2table --section=length
--section=sequence
| perl -p -e 's/id/protein_id/'
| %(load_statement)s
> %(outfile)s'''
P.run()
def buildCDSFasta(infiles, outfile):
'''output CDS sequences.
This method works by taking the CDNA and peptide sequence of a
particular transcript and aligning them in order to remove any
frameshifts.
.. note::
This method is untested.
Arguments
---------
infile : string
ENSEMBL :term:`gtf` formatted file
outfile : string
indexed file in :term:`fasta` format with CDS sequences.
'''
infile_cdnas, infile_peptides_fasta = infiles
dbname = outfile[:-len(".fasta")]
statement = '''gunzip < %(infile_cdnas)s
| cgat gff2fasta
--is-gtf
--genome=%(genome_dir)s/%(genome)s
| cgat index_fasta
%(dbname)s --force-output -
> %(dbname)s.log
'''
P.run()
tmpfile = P.getTempFile(".")
dbhandle = sqlite3.connect(PARAMS["database_name"])
cc = dbhandle.cursor()
tmpfile.write("protein_id\ttranscript_id\n")
tmpfile.write("\n".join(
["%s\t%s" % x for x in
cc.execute(
"SELECT DISTINCT protein_id, transcript_id "
"FROM transcript_info")]))
tmpfile.write("\n")
tmpfile.close()
tmpfilename = tmpfile.name
statement = '''
cgat peptides2cds
--peptides-fasta-file=%(infile_peptides_fasta)s
--cdnas=%(infile_cdnas)s
--map=%(tmpfilename)s
--output-format=fasta
--log=%(outfile)s.log
| cgat index_fasta
%(dbname)s --force-output -
> %(dbname)s.log
'''
P.run()
os.unlink(tmpfilename)
def loadGeneStats(infile, outfile):
"""compute and load gene statistics to database.
Gene statistics are computed by :doc:`gtf2table` with the
following counters:
* length - gene/exon lengths
* position - gene position
* composition-na - gene nucleotide composition
Parameters
----------
infile : string
A :term:`gtf` file which is output from :meth:`buildGenes`
outfile : string
A log file. The table name is derived from `outfile`.
e.g. bam_stats.load
"""
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--map=gene_name:str")
statement = '''
gunzip < %(infile)s
| cgat gtf2table
--log=%(outfile)s.log
--genome=%(genome_dir)s/%(genome)s
--counter=position
--counter=length
--counter=composition-na
| %(load_statement)s
> %(outfile)s'''
P.run()
def buildExons(infile, outfile):
'''output exons from ENSEMBL gene set.
Remove all features from a :term:`gtf` file that are not of
feature ``exon``.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
statement = '''
gunzip < %(infile)s
| awk '$3 == "exon"'
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=gene
--log=%(outfile)s.log
| gzip > %(outfile)s
'''
P.run()
def buildCodingExons(infile, outfile):
'''output protein coding exons from ENSEMBL gene set.
Remove all features from a :term:`gtf` file that are not ``exon``
and are not protein-coding.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter --filter-method=proteincoding
--log=%(outfile)s.log
| awk '$3 == "exon"'
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=gene
--log=%(outfile)s.log
| gzip > %(outfile)s
'''
P.run()
def buildNonCodingExons(infile, outfile):
'''output non-coding exons from ENSEMBL gene set.
Remove all features from a :term:`gtf` file that are ``exon``
and that are not protein-coding.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=filter --filter-method=proteincoding --invert-filter
--log=%(outfile)s.log
| awk '$3 == "exon"'
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=gene
--log=%(outfile)s.log
| gzip > %(outfile)s
'''
P.run()
def buildLincRNAExons(infile, outfile):
"""output LincRNA portion of ENSEMBL geneset.
Take all features from a :term:`gtf` file that are of feature type
``exon`` and that are annotated as a lincrna biotype.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
"""
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=filter --filter-method=lincrna
--log=%(outfile)s.log
| awk '$3 == "exon"'
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=gene
--log=%(outfile)s.log
| gzip > %(outfile)s
'''
P.run()
def buildCDS(infile, outfile):
'''output CDS features from an ENSEMBL gene set.
Take all features from a :term:`gtf` file that are of feature type
``CDS`` and that are annotated as protein-coding.
Note that only the coding parts of exons are output - UTR's are
removed.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gtf` format.
'''
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=filter --filter-method=proteincoding
--log=%(outfile)s.log
| awk '$3 == "CDS"'
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=gene
--log=%(outfile)s.log
| gzip > %(outfile)s
'''
P.run()
def loadTranscripts(infile, outfile):
'''load transcripts from a GTF file into the database.
The table will be indexed on ``gene_id`` and ``transcript_id``
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--add-index=transcript_id "
"--allow-empty-file ")
statement = '''
gunzip < %(infile)s
| cgat gtf2tsv
| %(load_statement)s
> %(outfile)s'''
P.run()
def loadGeneCoordinates(infile, outfile):
'''merge transcripts to generate the genomic coordinates per gene
and load '''
# TS. remove transcript_id column as this is now meaningless
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--ignore-column=transcript_id "
"--allow-empty-file ")
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
| cgat gtf2tsv
| %(load_statement)s
> %(outfile)s'''
P.run()
def loadTranscript2Gene(infile, outfile):
'''build a map of transcript to gene from gtf file and load into database.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--add-index=transcript_id ")
statement = '''
gunzip < %(infile)s
| cgat gtf2tsv --output-map=transcript2gene -v 0
| %(load_statement)s
> %(outfile)s'''
P.run()
def loadTranscriptStats(infile, outfile):
'''compute and load transcript properties into database.
The method calls :doc:`gtf2table` with the following counters:
* length - gene/exon lengths
* position - gene position
* composition-na - gene nucleotide composition
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--add-index=transcript_id "
"--map=gene_id:str")
statement = '''
gunzip < %(infile)s |\
cgat gtf2table \
--log=%(outfile)s.log \
--genome=%(genome_dir)s/%(genome)s \
--reporter=transcripts \
--counter=position \
--counter=length \
--counter=composition-na
| %(load_statement)s
> %(outfile)s'''
P.run()
def loadProteinStats(infile, outfile):
'''compute and load protein sequence properties into database.
The method computes amino acid composition, length, and hash
for each peptide sequence.
The method calls :doc:`fasta2table` with the following counters:
* length - protein sequence length
* hid - protein sequence hash identifier
* aa - protein sequence composition
Arguments
---------
infile : string
Fiename of ENSEMBL peptide file in :term:`fasta` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=protein_id "
"--map=protein_id:str")
statement = '''
gunzip < %(infile)s
| cgat fasta2fasta
--method=filter
--filter-method=min-length=1
| awk 'match($0, /(>[a-zA-Z]+[0-9]+)(\.[0-9])*(.*)/, a) {print a[1], a[3]}
!/^>/ {print}'
| cgat fasta2table
--log=%(outfile)s
--sequence-type=aa
--section=length
--section=hid
--section=aa
--regex-identifier="(\S+)"
| sed "s/^id/protein_id/"
| %(load_statement)s
> %(outfile)s'''
P.run()
def buildPromotorRegions(infile, outfile, promotor_size=1000):
'''annotate promotor regions from reference gene set.
This method builds promotor regions for transcripts
in an ENSEMBL gene set.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Filename in :term:`gff` format.
promotor_size : int
Size of the promotor region (nucleotides upstream
of TSS).
'''
statement = """
gunzip < %(infile)s
| cgat gff2gff --method=sanitize
--sanitize-method=genome
--skip-missing --genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| cgat gtf2gff --method=promotors
--promotor-size=%(promotor_size)s \
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
"""
P.run()
def buildTSSRegions(infile, outfile):
'''annotate promotor regions from reference gene set.
This method builds promotor regions for transcripts
in an ENSEMBL gene set.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Filename in :term:`gff` format.
'''
buildPromotorRegions(infile, outfile, promotor_size=1)
def buildOverlapWithEnsembl(infile, outfile, filename_bed):
'''compute overlap of genes with intervals.
If `filename_bed` has multiple tracks the overlap will
be computed for each track separately.
The output is a tab-separated table with pairs of
overlapping features between `infile` and `filename_bed`.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output file in :term:`tsv` format.
filename_bed : string
Filename in :term:`bed` format.
'''
statement = '''gunzip
< %(infile)s
| cgat gtf2gtf --method=merge-transcripts
| cgat gff2bed --is-gtf
| cgat bed2graph
--output-section=name
--log=%(outfile)s.log
- %(filename_bed)s
> %(outfile)s
'''
P.run()
def compareGeneSets(infiles, outfile):
'''compute overlap of genes, exons and transcripts between two
genesets.
This method uses :mod:`scripts/diff_gtf`.
Arguments
---------
infiles : list
Filenames of ENSEMBL genesets in :term:`gtf` format.
outfile : string
Output file in :term:`tsv` format.
'''
infiles = " ".join(infiles)
statement = '''
cgat diff_gtf
%(infiles)s
> %(outfile)s
'''
P.run()
def buildPseudogenes(infiles, outfile, dbhandle):
'''build a set of pseudogenes.
Transcripts are extracted from the GTF file and designated as
pseudogenes if:
* the gene_type or transcript_type contains the phrase
"pseudo". This taken is from the database.
* the feature is 'processed_transcript' and has similarity to
protein coding genes. Similarity is assessed by aligning the
transcript and peptide set against each other with exonerate_.
Pseudogenic transcripts can overlap with protein coding
transcripts.
Arguments
---------
infiles : list
Filenames of ENSEMBL geneset in :term:`gtf` format
and associated peptide sequences in :term:`fasta` format.
outfile : filename
Output in :term:`gtf` format with inferred or annotated
pseudogenes.
dbandle : object
Database handle for extracting transcript biotypes.
'''
infile_gtf, infile_peptides_fasta = infiles
# JJ - there are also 'nontranslated_CDS', but no explanation of these
if PARAMS["genome"].startswith("dm"):
E.warn("Ensembl dm genome annotations only contain source"
" 'pseudogenes' - skipping exonerate step")
statement = """zcat %(infile_gtf)s
|awk '$2 ~ /pseudogene/'
| gzip
> %(outfile)s"""
P.run()
return
tmpfile1 = P.getTempFilename(shared=True)
# collect processed transcripts and save as fasta sequences
statement = '''
zcat %(infile_gtf)s
| awk '$2 ~ /processed/'
| cgat gff2fasta
--is-gtf
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
> %(tmpfile1)s
'''
P.run()
if IOTools.isEmpty(tmpfile1):
E.warn("no pseudogenes found")
os.unlink(tmpfile1)
P.touch(outfile)
return
model = "protein2dna"
# map processed transcripts against peptide sequences
statement = '''
cat %(tmpfile1)s
| %(cmd-farm)s --split-at-regex=\"^>(\S+)\" --chunk-size=100
--log=%(outfile)s.log
"exonerate --target %%STDIN%%
--query %(infile_peptides_fasta)s
--model %(model)s
--bestn 1
--score 200
--ryo \\"%%qi\\\\t%%ti\\\\t%%s\\\\n\\"
--showalignment no --showsugar no --showcigar no --showvulgar no
"
| grep -v -e "exonerate" -e "Hostname"
| gzip > %(outfile)s.links.gz
'''
P.run()
os.unlink(tmpfile1)
inf = IOTools.openFile("%s.links.gz" % outfile)
best_matches = {}
for line in inf:
peptide_id, transcript_id, score = line[:-1].split("\t")
score = int(score)
if transcript_id in best_matches and \
best_matches[transcript_id][0] > score:
continue
best_matches[transcript_id] = (score, peptide_id)
inf.close()
E.info("found %i best links" % len(best_matches))
new_pseudos = set(best_matches.keys())
cc = dbhandle.cursor()
known_pseudos = set([x[0] for x in cc.execute(
"""SELECT DISTINCT transcript_id
FROM transcript_info
WHERE transcript_biotype like '%pseudo%' OR
gene_biotype like '%pseudo%' """)])
E.info("pseudogenes from: processed_transcripts=%i, known_pseudos=%i, "
"intersection=%i" % (
(len(new_pseudos),
len(known_pseudos),
len(new_pseudos.intersection(known_pseudos)))))
all_pseudos = new_pseudos.union(known_pseudos)
c = E.Counter()
outf = IOTools.openFile(outfile, "w")
inf = GTF.iterator(IOTools.openFile(infile_gtf))
for gtf in inf:
c.input += 1
if gtf.transcript_id not in all_pseudos:
continue
c.output += 1
outf.write("%s\n" % gtf)
outf.close()
E.info("exons: %s" % str(c))
def buildNUMTs(infile, outfile):
'''output set of potential nuclear mitochondrial genes (NUMTs).
This function works by aligning the mitochondrial chromosome
against genome using exonerate_. This can take a while.
Arguments
---------
infile : string
Ignored.
outfile : filename
Output in :term:`gtf` format with potential NUMTs.
'''
if not PARAMS["numts_mitochrom"]:
E.info("skipping numts creation")
P.touch(outfile)
return
fasta = IndexedFasta.IndexedFasta(
os.path.join(PARAMS["genome_dir"], PARAMS["genome"]))
if PARAMS["numts_mitochrom"] not in fasta:
E.warn("mitochondrial genome %s not found" % PARAMS["numts_mitochrom"])
P.touch(outfile)
return
tmpfile_mito = P.getTempFilename(".")
statement = '''
cgat index_fasta
--extract=%(numts_mitochrom)s
--log=%(outfile)s.log
%(genome_dir)s/%(genome)s
> %(tmpfile_mito)s
'''
P.run()
if IOTools.isEmpty(tmpfile_mito):
E.warn("mitochondrial genome empty.")
os.unlink(tmpfile_mito)
P.touch(outfile)
return
format = ("qi", "qS", "qab", "qae",
"ti", "tS", "tab", "tae",
"s",
"pi",
"C")
format = "\\\\t".join(["%%%s" % x for x in format])
# collect all results
min_score = 100
statement = '''
cat %(genome_dir)s/%(genome)s.fasta
| %(cmd-farm)s --split-at-regex=\"^>(\S+)\" --chunk-size=1
--log=%(outfile)s.log
"exonerate --target %%STDIN%%
--query %(tmpfile_mito)s
--model affine:local
--score %(min_score)i
--showalignment no --showsugar no --showcigar no
--showvulgar no
--ryo \\"%(format)s\\n\\"
"
| grep -v -e "exonerate" -e "Hostname"
| gzip > %(outfile)s.links.gz
'''
P.run()
# convert to gtf
inf = IOTools.openFile("%s.links.gz" % outfile)
outf = IOTools.openFile(outfile, "w")
min_score = PARAMS["numts_score"]
c = E.Counter()
for line in inf:
(query_contig, query_strand, query_start, query_end,
target_contig, target_strand, target_start, target_end,
score, pid, alignment) = line[:-1].split("\t")
c.input += 1
score = int(score)
if score < min_score:
c.skipped += 1
continue
if target_strand == "-":
target_start, target_end = target_end, target_start
gff = GTF.Entry()
gff.contig = target_contig
gff.start, gff.end = int(target_start), int(target_end)
assert gff.start < gff.end
gff.strand = target_strand
gff.score = int(score)
gff.feature = "numts"
gff.gene_id = "%s:%s-%s" % (query_contig, query_start, query_end)
gff.transcript_id = "%s:%s-%s" % (query_contig, query_start, query_end)
outf.write("%s\n" % str(gff))
c.output += 1
inf.close()
outf.close()
E.info("filtering numts: %s" % str(c))
os.unlink(tmpfile_mito)
def sortGTF(infile, outfile, order="contig+gene"):
'''sort a gtf file.
The sorting is performed on the cluster.
Arguments
---------
infile : string
Geneset in :term:`gtf` format.
outfile : string
Geneset in :term:`gtf` format.
order : string
Sort order. See :mod:`scripts/gtf2gtf` for valid options for
`order`.
'''
if infile.endswith(".gz"):
uncompress = "zcat"
else:
# wastefull
uncompress = "cat"
if outfile.endswith(".gz"):
compress = "gzip"
else:
compress = "cat"
job_memory = "4G"
statement = '''%(uncompress)s %(infile)s
| cgat gtf2gtf
--method=sort --sort-order=%(order)s --log=%(outfile)s.log
| %(compress)s > %(outfile)s'''
P.run()
def buildGenomicFunctionalAnnotation(gtffile, dbh, outfiles,
job_memory="4G"):
'''output a bed file with functional annotations.
The genomic region a gene covers is taken from the `gtffile`.
There should only be one entry per gene, i.e. exons should
have been combined into a gene territory.
Each entry in the output bed file is a gene territory. Bed entries
are labeled by functional annotations associated by that gene.
Ambiguities in territories are resolved by outputting annotations
for all genes within a territory.
The output file contains annotations for both GO and GOSlim. These
are prefixed by ``go:`` and ``goslim:``.
Arguments
---------
gtffile : string
ENSEMBL geneset in :term:`gtf` format.
dbh : object
Database handle to retrieve GO assignments for each gene
outfiles : list
Output filenames. The first is a :term:`bed` formatted file
of gene territories. The second is a :term:`tsv` formatted
table mapping GO terms to their description.
'''
outfile_bed, outfile_tsv = outfiles
gene2region = {}
for gtf in GTF.iterator(IOTools.openFile(gtffile, "r")):
gid = gtf.gene_id.split(":")
for g in gid:
gene2region[g] = (gtf.contig, gtf.start, gtf.end, gtf.strand)
cc = dbh.cursor()
outf = P.getTempFile(".")
c = E.Counter()
term2description = {}
for db in ('go', 'goslim'):
for gene_id, go_id, description in cc.execute(
"SELECT gene_id, go_id, description FROM %s_assignments" % db):
try:
contig, start, end, strand = gene2region[gene_id]
except KeyError:
c.notfound += 1
continue
outf.write(
"\t".join(map(str, (
contig, start, end,
"%s:%s" % (db, go_id), 1, strand))) + "\n")
term2description["%s:%s" % (db, go_id)] = description
outf.close()
tmpfname = outf.name
statement = '''sort -k1,1 -k2,2n < %(tmpfname)s | uniq
| gzip > %(outfile_bed)s'''
P.run()
outf = IOTools.openFile(outfile_tsv, "w")
outf.write("term\tdescription\n")
for term, description in term2description.items():
outf.write("%s\t%s\n" % (term, description))
outf.close()
os.unlink(tmpfname)
def buildGenomicContext(infiles, outfile, distance=10):
'''build a :term:`bed` formatted file with genomic context.
The output is a bed formatted file, annotating genomic segments
according to whether they are any of the ENSEMBL annotations.
The function also adds the RNA and repeats annotations from the UCSC.
The annotations can be partially or fully overlapping.
The annotations can be partially or fully overlapping. Adjacent
features (less than 10 bp apart) of the same type are merged.
Arguments
---------
infiles : list
A list of input files to generate annotations from. The contents are
1. ``repeats``, a :term:`gff` formatted file with repeat annotations
2. ``rna``, a :term:`gff` formatted file with small, repetetive
RNA annotations
3. ``annotations``, a :term:`gtf` formatted file with genomic
annotations, see :func:`annotateGenome`.
4. ``geneset_flat``, a flattened gene set in :term:`gtf` format, see
:func:`buildFlatGeneSet`.
outfile : string
Output filename in :term:`bed` format.
distance : int
Merge adajcent features of the same type within this distance.
'''
repeats_gff, rna_gff, annotations_gtf, geneset_flat_gff, \
cpgisland_bed, go_tsv = infiles
tmpfile = P.getTempFilename(shared=True)
tmpfiles = ["%s_%i" % (tmpfile, x) for x in range(6)]
# add ENSEMBL annotations
statement = """
zcat %(annotations_gtf)s
| cgat gtf2gtf
--method=sort --sort-order=gene
| cgat gtf2gtf
--method=merge-exons --log=%(outfile)s.log
| cgat gff2bed
--set-name=gene_biotype --is-gtf
--log=%(outfile)s.log
| sort -k 1,1 -k2,2n
| cgat bed2bed --method=merge --merge-by-name
--merge-distance=%(distance)i --log=%(outfile)s.log
> %(tmpfile)s_0
"""
P.run()
# rna
statement = '''
zcat %(repeats_gff)s %(rna_gff)s
| cgat gff2bed --set-name=family --is-gtf -v 0
| sort -k1,1 -k2,2n
| cgat bed2bed --method=merge --merge-by-name
--merge-distance=%(distance)i --log=%(outfile)s.log
> %(tmpfile)s_1'''
P.run()
# add aggregate intervals for repeats
statement = '''
zcat %(repeats_gff)s
| cgat gff2bed --set-name=family --is-gtf -v 0
| awk -v OFS="\\t" '{$4 = "repeats"; print}'
| sort -k1,1 -k2,2n
| cgat bed2bed --method=merge --merge-by-name
--merge-distance=%(distance)i --log=%(outfile)s.log
> %(tmpfile)s_2'''
P.run()
# add aggregate intervals for rna
statement = '''
zcat %(rna_gff)s
| cgat gff2bed --set-name=family --is-gtf -v 0
| awk -v OFS="\\t" '{$4 = "repetetive_rna"; print}'
| sort -k1,1 -k2,2n
| cgat bed2bed --method=merge --merge-by-name
--merge-distance=%(distance)i --log=%(outfile)s.log
> %(tmpfile)s_3 '''
P.run()
# add ribosomal protein coding genes
goids = ("GO:0003735", )
patterns = "-e %s" % ("-e ".join(goids))
statement = '''
zcat %(geneset_flat_gff)s
| cgat gtf2gtf
--map-tsv-file=<(zcat %(go_tsv)s | grep %(patterns)s | cut -f 2 | sort | uniq)
--method=filter --filter-method=gene
--log=%(outfile)s.log
| cgat gff2bed
--log=%(outfile)s.log
| awk -v OFS="\\t" '{$4 = "ribosomal_coding"; print}'
| sort -k1,1 -k2,2n
| cgat bed2bed --method=merge --merge-by-name
--merge-distance=%(distance)i --log=%(outfile)s.log
> %(tmpfile)s_4
'''
P.run()
# CpG islands
statement = '''
zcat %(cpgisland_bed)s
| awk '{printf("%%s\\t%%i\\t%%i\\tcpgisland\\n", $1,$2,$3 )}'
> %(tmpfile)s_5
'''
P.run()
# sort and merge
# remove strand information as bedtools
# complains if there are annotations with
# different number of field
files = " ".join(tmpfiles)
statement = '''
sort --merge -k1,1 -k2,2n %(files)s
| cut -f 1-4
| gzip
> %(outfile)s
'''
P.run()
for x in tmpfiles:
os.unlink(x)
| {
"content_hash": "c68d78106f18b566bd58c05c33fce7d4",
"timestamp": "",
"source": "github",
"line_count": 1647,
"max_line_length": 179,
"avg_line_length": 27.97571341833637,
"alnum_prop": 0.5948650056428509,
"repo_name": "CGATOxford/CGATPipelines",
"id": "6bc8452c96db04b6be7d85a6bee37a4cac5f3346",
"size": "46076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CGATPipelines/PipelineGeneset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
} |
import pytest
from airflow.providers.google.cloud.example_dags.example_speech_to_text import BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_GCS_KEY)
class GCPSpeechToTextExampleDagSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
self.create_gcs_bucket(BUCKET_NAME)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.delete_gcs_bucket(BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_GCS_KEY)
def test_run_example_dag_gcp_speech_to_text(self):
self.run_dag("example_gcp_speech_to_text", CLOUD_DAG_FOLDER)
| {
"content_hash": "5aebc3a02b316cd534cdf3bb09c4453e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 37.30434782608695,
"alnum_prop": 0.7354312354312355,
"repo_name": "bolkedebruin/airflow",
"id": "af4a4fd0a750fa35c270c2fdf222164986f3d099",
"size": "1646",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_speech_to_text_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
"""
:copyright: Alistair Muldal
:license: Unknown, shared on StackOverflow and Pastebin
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
<http://www.cs.berkeley.edu/~malik/papers/MP-aniso.pdf>
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<alistair.muldal@pharm.ox.ac.uk>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
import numpy as np
import warnings
def anisodiff(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.), option=1):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence
diffusion across step edges. A large value reduces the influence of
intensity gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between
adjacent pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast
ones.
Diffusion equation 2 favours wide regions over smaller ones.
"""
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
m = "Only grayscale images allowed, converting to 2D matrix"
warnings.warn(m)
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in xrange(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma*(NS+EW)
return imgout
def anisodiff3(stack, niter=1, kappa=50, gamma=0.1, step=(1., 1., 1.), option=1):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence
diffusion across step edges. A large value reduces the influence of
intensity gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between
adjacent pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast
ones.
Diffusion equation 2 favours wide regions over smaller ones.
"""
# ...you could always diffuse each color channel independently if you
# really want
if stack.ndim == 4:
m = "Only grayscale stacks allowed, converting to 3D matrix"
warnings.warn(m)
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
for ii in range(niter):
# calculate the diffs
deltaD[:-1, :, :] = np.diff(stackout, axis=0)
deltaS[:, :-1, :] = np.diff(stackout, axis=1)
deltaE[:, :, :-1] = np.diff(stackout, axis=2)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# Update matrices.
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# Subtract a copy that has been shifted 'Up/North/West' by one
# pixel. Don't ask questions. Just do it. Trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:, :, :] -= D[:-1, :, :]
NS[:, 1:, :] -= S[:, :-1, :]
EW[:, :, 1:] -= E[:, :, :-1]
# update the image
stackout += gamma*(UD+NS+EW)
return stackout
| {
"content_hash": "3b8409c9d305a66f53227dc85d6ab939",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 81,
"avg_line_length": 30.96135265700483,
"alnum_prop": 0.6036823217350601,
"repo_name": "agile-geoscience/agilegeo",
"id": "e9ae9764ef9d72646c779372d3b268a4c54e8afb",
"size": "6433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bruges/filters/anisodiff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "113166"
}
],
"symlink_target": ""
} |
import os
import lxml.etree
def constructURL(command, params=None):
""" construct a URL for e-stat """
base = "http://api.e-stat.go.jp/rest/1.0/app"
key = os.environ["PYESTAT_KEY"]
commandList = {
"getStatsList": ["surveyYears", "openYears", "statsField", "statsCode",
"searchWord", "searchKind", "statsNameList"],
"getMetaInfo": ["statsDataId"],
"getStatsData": ["dataSetId", "statsDataId", "limit"]
}
# command check
if command not in commandList.keys():
raise ValueError("command must one of " + ", ".join(commandList))
# argument check
for param in params.keys():
if param not in commandList[command]:
raise ValueError(
"argument of %s must one of " % (command)
+ ", ".join(commandList[command]))
# construct url
url = "%s/%s?appId=%s" % (base, command, key)
if params:
for key in params.keys():
url += "&" + key + "=" + params[key]
return url
def xml2obj_sub(xml):
ret = {}
for tag in xml.iterchildren():
ret[tag.tag.lower()] = tag.text
return ret
def xml2obj_attrib2dict(xml):
obj = {}
for attrib in xml.attrib.keys():
obj[attrib.lower()] = xml.attrib[attrib]
if xml.text:
if obj:
obj["text"] = xml.text
else:
obj = xml.text
return obj
def xml2obj(string):
"""XML response to object"""
xml = lxml.etree.fromstring(string)
obj = {}
if xml.tag == "GET_STATS_LIST":
obj["result"] = xml2obj_sub(xml.find("RESULT"))
obj["parameter"] = xml2obj_sub(xml.find("PARAMETER"))
obj["datalist_inf"] = []
for tag in xml.find("DATALIST_INF").iterfind("LIST_INF"):
listinf = xml2obj_attrib2dict(tag)
for tmp in tag.iterchildren():
listinf[tmp.tag.lower()] = xml2obj_attrib2dict(tmp)
obj["datalist_inf"].append(listinf)
return obj
if xml.tag == "GET_META_INFO":
obj["result"] = xml2obj_sub(xml.find("RESULT"))
obj["parameter"] = xml2obj_sub(xml.find("PARAMETER"))
obj["metadata_inf"] = {}
obj["metadata_inf"]["table_inf"] = xml2obj_sub(xml.find("METADATA_INF").find("TABLE_INF"))
obj["metadata_inf"]["class_inf"] = []
for tag in xml.find("METADATA_INF").find("CLASS_INF").iterchildren():
classobj = xml2obj_attrib2dict(tag)
classobj["class"] = [t.attrib for t in tag.iterchildren()]
obj["metadata_inf"]["class_inf"].append(classobj)
return obj
if xml.tag == "GET_STATS_DATA":
obj["result"] = xml2obj_sub(xml.find("RESULT"))
obj["parameter"] = xml2obj_sub(xml.find("PARAMETER"))
obj["statistical_data"] = {
"table_inf": {},
"class_inf": [],
"data_inf": {"note": [], "value": []}
}
obj["statistical_data"]["table_inf"] = xml2obj_sub(xml.find("STATISTICAL_DATA").find("TABLE_INF"))
for tag in xml.find("STATISTICAL_DATA").find("CLASS_INF").iterchildren():
classobj = xml2obj_attrib2dict(tag)
classobj["class"] = [t.attrib for t in tag.iterchildren()]
obj["statistical_data"]["class_inf"].append(classobj)
for tag in xml.find("STATISTICAL_DATA").find("DATA_INF").iterfind("NOTE"):
note = xml2obj_attrib2dict(tag)
obj["statistical_data"]["data_inf"]["note"].append(note)
for tag in xml.find("STATISTICAL_DATA").find("DATA_INF").iterfind("VALUE"):
value = xml2obj_attrib2dict(tag)
obj["statistical_data"]["data_inf"]["value"].append(value)
return obj
def find(year=None, month=None, statsCode=None, keyword=None):
""" find stats id """
params = {}
###
## surveyYears
if type(year) == tuple and month is None:
raise ValueError("if year is tuple, month must be given")
if year is not None:
fr = ""
to = ""
if type(year) == tuple and len(year) != 2:
raise ValueError("year must be integer or 2-length tuple")
fr = "%4d" % (year[0] if type(year) == tuple else year)
to = "%4d" % (year[1] if type(year) == tuple else year)
if month is not None:
if type(month) == tuple and len(month) != 2:
raise ValueError("month must be integer or 2-length tuple")
fr = fr + "%02d" % (month[0] if type(month) == tuple else month)
to = to + "%02d" % (month[1] if type(month) == tuple else month)
if fr == to:
params["surveyYears"] = fr
else:
params["surveyYears"] = fr + "-" + to
url = constructURL("getStatsList", params)
| {
"content_hash": "1aed88a75d6bbaf6ba0417f1a5b2e10f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 106,
"avg_line_length": 34.042857142857144,
"alnum_prop": 0.5558120016785565,
"repo_name": "mzmttks/pyestat",
"id": "fe7a695851b1a2d4c37863288ec84ac1d002531e",
"size": "4766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyestat/getData.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7286"
}
],
"symlink_target": ""
} |
"""
Handlers to process the responses from the Humble Bundle API
"""
__author__ = "Joel Pedraza"
__copyright__ = "Copyright 2014, Joel Pedraza"
__license__ = "MIT"
import itertools
import requests
from humblebundle.exceptions import *
from humblebundle.models import *
# Helper methods
def parse_data(response):
try:
return response.json()
except ValueError as e:
raise HumbleParseException("Invalid JSON: %s", str(e), request=response.request, response=response)
def get_errors(data):
errors = data.get('errors', None)
error_msg = ", ".join(itertools.chain.from_iterable(v for k, v in errors.items())) \
if errors else "Unspecified error"
return errors, error_msg
def authenticated_response_helper(response, data):
# Successful API calls might not have a success property.
# It's not enough to check if it's falsy, as None is acceptable
success = data.get('success', None)
if success is True:
return True
error_id = data.get('error_id', None)
errors, error_msg = get_errors(data)
# API calls that require login and have a missing or invalid token
if error_id == 'login_required':
raise HumbleAuthenticationException(error_msg, request=response.request, response=response)
# Something happened, we're not sure what but we hope the error_msg is useful
if success is False or errors is not None or error_id is not None:
raise HumbleResponseException(error_msg, request=response.request, response=response)
# Response had no success or errors fields, it's probably data
return True
# Response handlers
def login_handler(client, response):
""" login response always returns JSON """
data = parse_data(response)
success = data.get('success', None)
if success is True:
return True
captcha_required = data.get('captcha_required')
authy_required = data.get('authy_required')
errors, error_msg = get_errors(data)
if errors:
captcha = errors.get('captcha')
if captcha:
raise HumbleCaptchaException(error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required)
username = errors.get('username')
if username:
raise HumbleCredentialException(error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required)
authy_token = errors.get("authy-token")
if authy_token:
raise HumbleTwoFactorException(error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required)
raise HumbleAuthenticationException(error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required)
def gamekeys_handler(client, response):
""" get_gamekeys response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [v['gamekey'] for v in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise HumbleResponseException("Unexpected response body", request=response.request, response=response)
def order_list_handler(client, response):
""" order_list response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [Order(client, order) for order in data]
# Let the helper function raise any common exceptions
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise HumbleResponseException("Unexpected response body", request=response.request, response=response)
def order_handler(client, response):
""" order response might be 404 with no body if not found """
if response.status_code == requests.codes.not_found:
raise HumbleResponseException("Order not found", request=response.request, response=response)
data = parse_data(response)
# The helper function should be sufficient to catch any other errors
if authenticated_response_helper(response, data):
return Order(client, data)
def claimed_entities_handler(client, response):
"""
claimed_entities response always returns JSON
returns parsed json dict
"""
data = parse_data(response)
# The helper function should be sufficient to catch any errors
if authenticated_response_helper(response, data):
return data
def sign_download_url_handler(client, response):
""" sign_download_url response always returns JSON """
data = parse_data(response)
# If the request is unauthorized (this includes invalid machine names) this response has it's own error syntax
errors = data.get('_errors', None)
message = data.get('_message', None)
if errors:
error_msg = "%s: %s" % (errors, message)
raise HumbleResponseException(error_msg, request=response.request, response=response)
# If the user isn't signed in we get a "typical" error response
if authenticated_response_helper(response, data):
return data['signed_url']
def store_products_handler(client, response):
""" Takes a results from the store as JSON and converts it to object """
data = parse_data(response)
return [StoreProduct(client, result) for result in data['results']]
| {
"content_hash": "da99466bc422de1537514e15ffe4adc0",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 114,
"avg_line_length": 33.14705882352941,
"alnum_prop": 0.6858917480035492,
"repo_name": "saik0/humblebundle-python",
"id": "36fc6e1d6dec94cb42114e9b864b2b57c25dadc4",
"size": "5635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humblebundle/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30438"
}
],
"symlink_target": ""
} |
#include our libraries & settings
import httplib
import time
import settings
import motion
import shutil
import os
import sys
while True:
try:
#create a connection to the webcam
print "Connectiong to", settings.host
conn = httplib.HTTPConnection(settings.host)
#request the mjpeg stream
print "Requesting stream @", settings.path
conn.request("GET", settings.path)
#get the response
r1 = conn.getresponse()
#display the response status
print "Device returned:", r1.status, r1.reason
#control variables initial states
in_file=False
file_write=False
first_image=True
#read data from the response and write it to a file
print "Streaming", settings.chunk_size, " byte chunks from device"
while True:
new_data=r1.read(settings.chunk_size) #chunk size is in bytes
#default values for every chunk
start_write=0
end_write=settings.chunk_size
for i in range(0, settings.chunk_size - 1):
if in_file:
file_write=True
#look for jpeg end of image (aka EOI) mark (0xFFD9)
if(new_data[i]==chr(255) and new_data[i+1]==chr(217)):
#Record the last byte address of this chunk
end_write=i+2
#Record that we are no longer in a file
in_file=False
#Perform our final write
f.write(new_data[start_write:end_write])
#Record that we are done writing to this file
file_write==False
#Close the file
f.close()
#if not the first image then look for motion
if (first_image==False):
# difference = motion.compare(filename, prior_filename)
# print "Hue Variation : " , int(difference)
motion_level = motion.detect_motion(prior_filename, filename, settings.rgb_threshold, settings.rgb_blocksize, settings.rgb_outline, settings.rgb_outline_color)
os.remove(prior_filename)
print "Motion Level :", int(motion_level)
if motion_level > settings.threshold:
shutil.copy(filename, "motion/")
if settings.rgb_outline:
shutil.copy(filename + ".outlined.jpg", "motion.outlined/")
os.remove(filename + ".outlined.jpg")
#record the filename of the image we just created as the prior image
prior_filename=filename
#Record we are no longer after the first iamge
first_image=False
else:
#look for jpeg start of image (aka SOI) mark (0xFFD8)
if(new_data[i]==chr(255) and new_data[i+1]==chr(216)):
#We want to write data to the file this pass
file_write=True
#We are now inside a file
in_file=True
#Record the offset to start writing at
start_write=i
#lets assume this frame ends at the end of this for safety sake
end_write=settings.chunk_size
#open a file to write to
filename="tmp/" + str(time.time()) + ".jpg"
f=open(filename, 'wb')
else:
file_write=False
#Check if we need to perform a write
if(file_write==True):
f.write(new_data[start_write:end_write])
except:
print "Unexpected error:", sys.exc_info()[0], sys.exc_info()[1]
print "Attempting to recover."
| {
"content_hash": "dc4cca6373fe8a1926994d372b0a541f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 187,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.485727765982543,
"repo_name": "jared0x90/pyipcam",
"id": "e121f485f6e0853bea1c0c5172a110d1c6799df2",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipcam.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12210"
}
],
"symlink_target": ""
} |
"""
S-expression-based persistence of python objects.
It does something very much like L{Pickle<pickle>}; however, pickle's main goal
seems to be efficiency (both in space and time); jelly's main goals are
security, human readability, and portability to other environments.
This is how Jelly converts various objects to s-expressions.
Boolean::
True --> ['boolean', 'true']
Integer::
1 --> 1
List::
[1, 2] --> ['list', 1, 2]
String::
\"hello\" --> \"hello\"
Float::
2.3 --> 2.3
Dictionary::
{'a': 1, 'b': 'c'} --> ['dictionary', ['b', 'c'], ['a', 1]]
Module::
UserString --> ['module', 'UserString']
Class::
UserString.UserString --> ['class', ['module', 'UserString'], 'UserString']
Function::
string.join --> ['function', 'join', ['module', 'string']]
Instance: s is an instance of UserString.UserString, with a __dict__
{'data': 'hello'}::
[\"UserString.UserString\", ['dictionary', ['data', 'hello']]]
Class Method: UserString.UserString.center::
['method', 'center', ['None'], ['class', ['module', 'UserString'],
'UserString']]
Instance Method: s.center, where s is an instance of UserString.UserString::
['method', 'center', ['instance', ['reference', 1, ['class',
['module', 'UserString'], 'UserString']], ['dictionary', ['data', 'd']]],
['dereference', 1]]
The C{set} builtin and the C{sets.Set} class are serialized to the same
thing, and unserialized to C{set} if available, else to C{sets.Set}. It means
that there's a possibility of type switching in the serialization process. The
solution is to always use C{set}.
The same rule applies for C{frozenset} and C{sets.ImmutableSet}.
@author: Glyph Lefkowitz
"""
# System Imports
import types
import warnings
import decimal
from functools import reduce
import copy
import datetime
try:
from types import (ClassType as _OldStyleClass,
InstanceType as _OldStyleInstance)
except ImportError:
# On Python 3 and higher, ClassType and InstanceType
# are gone. Use an empty tuple to pass to isinstance()
# tests without throwing an exception.
_OldStyleClass = ()
_OldStyleInstance = ()
_SetTypes = [set]
_ImmutableSetTypes = [frozenset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
try:
import sets as _sets
except ImportError:
# sets module is deprecated in Python 2.6, and gone in
# Python 3
_sets = None
else:
_SetTypes.append(_sets.Set)
_ImmutableSetTypes.append(_sets.ImmutableSet)
from zope.interface import implementer
# Twisted Imports
from twisted.python.compat import unicode, long, nativeString
from twisted.python.reflect import namedObject, qual, namedAny
from twisted.persisted.crefutil import NotKnown, _Tuple, _InstanceMethod
from twisted.persisted.crefutil import _DictKeyAndValue, _Dereference
from twisted.persisted.crefutil import _Container
from twisted.spread.interfaces import IJellyable, IUnjellyable
from twisted.python.compat import _PY3
from twisted.python.deprecate import deprecatedModuleAttribute
from incremental import Version
DictTypes = (dict,)
None_atom = b"None" # N
# code
class_atom = b"class" # c
module_atom = b"module" # m
function_atom = b"function" # f
# references
dereference_atom = b'dereference' # D
persistent_atom = b'persistent' # p
reference_atom = b'reference' # r
# mutable collections
dictionary_atom = b"dictionary" # d
list_atom = b'list' # l
set_atom = b'set'
# immutable collections
# (assignment to __dict__ and __class__ still might go away!)
tuple_atom = b"tuple" # t
instance_atom = b'instance' # i
frozenset_atom = b'frozenset'
deprecatedModuleAttribute(
Version("Twisted", 15, 0, 0),
"instance_atom is unused within Twisted.",
"twisted.spread.jelly", "instance_atom")
# errors
unpersistable_atom = b"unpersistable"# u
unjellyableRegistry = {}
unjellyableFactoryRegistry = {}
def _createBlank(cls):
"""
Given an object, if that object is a type (or a legacy old-style class),
return a new, blank instance of that type which has not had C{__init__}
called on it. If the object is not a type, return C{None}.
@param cls: The type (or class) to create an instance of.
@type cls: L{_OldStyleClass}, L{type}, or something else that cannot be
instantiated.
@return: a new blank instance or L{None} if C{cls} is not a class or type.
"""
if isinstance(cls, type):
return cls.__new__(cls)
if not _PY3 and isinstance(cls, _OldStyleClass):
return _OldStyleInstance(cls)
def _newInstance(cls, state):
"""
Make a new instance of a class without calling its __init__ method.
Supports both new- and old-style classes.
@param state: A C{dict} used to update C{inst.__dict__} either directly or
via C{__setstate__}, if available.
@return: A new instance of C{cls}.
"""
instance = _createBlank(cls)
def defaultSetter(state):
instance.__dict__ = state
setter = getattr(instance, "__setstate__", defaultSetter)
setter(state)
return instance
def _maybeClass(classnamep):
isObject = isinstance(classnamep, type)
if isObject or ((not _PY3) and isinstance(classnamep, _OldStyleClass)):
classnamep = qual(classnamep)
if not isinstance(classnamep, bytes):
classnamep = classnamep.encode('utf-8')
return classnamep
def setUnjellyableForClass(classname, unjellyable):
"""
Set which local class will represent a remote type.
If you have written a Copyable class that you expect your client to be
receiving, write a local "copy" class to represent it, then call::
jellier.setUnjellyableForClass('module.package.Class', MyCopier).
Call this at the module level immediately after its class
definition. MyCopier should be a subclass of RemoteCopy.
The classname may be a special tag returned by
'Copyable.getTypeToCopyFor' rather than an actual classname.
This call is also for cached classes, since there will be no
overlap. The rules are the same.
"""
global unjellyableRegistry
classname = _maybeClass(classname)
unjellyableRegistry[classname] = unjellyable
globalSecurity.allowTypes(classname)
def setUnjellyableFactoryForClass(classname, copyFactory):
"""
Set the factory to construct a remote instance of a type::
jellier.setUnjellyableFactoryForClass('module.package.Class', MyFactory)
Call this at the module level immediately after its class definition.
C{copyFactory} should return an instance or subclass of
L{RemoteCopy<pb.RemoteCopy>}.
Similar to L{setUnjellyableForClass} except it uses a factory instead
of creating an instance.
"""
global unjellyableFactoryRegistry
classname = _maybeClass(classname)
unjellyableFactoryRegistry[classname] = copyFactory
globalSecurity.allowTypes(classname)
def setUnjellyableForClassTree(module, baseClass, prefix=None):
"""
Set all classes in a module derived from C{baseClass} as copiers for
a corresponding remote class.
When you have a hierarchy of Copyable (or Cacheable) classes on one
side, and a mirror structure of Copied (or RemoteCache) classes on the
other, use this to setUnjellyableForClass all your Copieds for the
Copyables.
Each copyTag (the \"classname\" argument to getTypeToCopyFor, and
what the Copyable's getTypeToCopyFor returns) is formed from
adding a prefix to the Copied's class name. The prefix defaults
to module.__name__. If you wish the copy tag to consist of solely
the classname, pass the empty string \'\'.
@param module: a module object from which to pull the Copied classes.
(passing sys.modules[__name__] might be useful)
@param baseClass: the base class from which all your Copied classes derive.
@param prefix: the string prefixed to classnames to form the
unjellyableRegistry.
"""
if prefix is None:
prefix = module.__name__
if prefix:
prefix = "%s." % prefix
for name in dir(module):
loaded = getattr(module, name)
try:
yes = issubclass(loaded, baseClass)
except TypeError:
"It's not a class."
else:
if yes:
setUnjellyableForClass('%s%s' % (prefix, name), loaded)
def getInstanceState(inst, jellier):
"""
Utility method to default to 'normal' state rules in serialization.
"""
if hasattr(inst, "__getstate__"):
state = inst.__getstate__()
else:
state = inst.__dict__
sxp = jellier.prepare(inst)
sxp.extend([qual(inst.__class__).encode('utf-8'), jellier.jelly(state)])
return jellier.preserve(inst, sxp)
def setInstanceState(inst, unjellier, jellyList):
"""
Utility method to default to 'normal' state rules in unserialization.
"""
state = unjellier.unjelly(jellyList[1])
if hasattr(inst, "__setstate__"):
inst.__setstate__(state)
else:
inst.__dict__ = state
return inst
class Unpersistable:
"""
This is an instance of a class that comes back when something couldn't be
unpersisted.
"""
def __init__(self, reason):
"""
Initialize an unpersistable object with a descriptive C{reason} string.
"""
self.reason = reason
def __repr__(self):
return "Unpersistable(%s)" % repr(self.reason)
@implementer(IJellyable)
class Jellyable:
"""
Inherit from me to Jelly yourself directly with the `getStateFor'
convenience method.
"""
def getStateFor(self, jellier):
return self.__dict__
def jellyFor(self, jellier):
"""
@see: L{twisted.spread.interfaces.IJellyable.jellyFor}
"""
sxp = jellier.prepare(self)
sxp.extend([
qual(self.__class__).encode('utf-8'),
jellier.jelly(self.getStateFor(jellier))])
return jellier.preserve(self, sxp)
@implementer(IUnjellyable)
class Unjellyable:
"""
Inherit from me to Unjelly yourself directly with the
C{setStateFor} convenience method.
"""
def setStateFor(self, unjellier, state):
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
"""
Perform the inverse operation of L{Jellyable.jellyFor}.
@see: L{twisted.spread.interfaces.IUnjellyable.unjellyFor}
"""
state = unjellier.unjelly(jellyList[1])
self.setStateFor(unjellier, state)
return self
class _Jellier:
"""
(Internal) This class manages state for a call to jelly()
"""
def __init__(self, taster, persistentStore, invoker):
"""
Initialize.
"""
self.taster = taster
# `preserved' is a dict of previously seen instances.
self.preserved = {}
# `cooked' is a dict of previously backreferenced instances to their
# `ref' lists.
self.cooked = {}
self.cooker = {}
self._ref_id = 1
self.persistentStore = persistentStore
self.invoker = invoker
def _cook(self, object):
"""
(internal) Backreference an object.
Notes on this method for the hapless future maintainer: If I've already
gone through the prepare/preserve cycle on the specified object (it is
being referenced after the serializer is \"done with\" it, e.g. this
reference is NOT circular), the copy-in-place of aList is relevant,
since the list being modified is the actual, pre-existing jelly
expression that was returned for that object. If not, it's technically
superfluous, since the value in self.preserved didn't need to be set,
but the invariant that self.preserved[id(object)] is a list is
convenient because that means we don't have to test and create it or
not create it here, creating fewer code-paths. that's why
self.preserved is always set to a list.
Sorry that this code is so hard to follow, but Python objects are
tricky to persist correctly. -glyph
"""
aList = self.preserved[id(object)]
newList = copy.copy(aList)
# make a new reference ID
refid = self._ref_id
self._ref_id = self._ref_id + 1
# replace the old list in-place, so that we don't have to track the
# previous reference to it.
aList[:] = [reference_atom, refid, newList]
self.cooked[id(object)] = [dereference_atom, refid]
return aList
def prepare(self, object):
"""
(internal) Create a list for persisting an object to. This will allow
backreferences to be made internal to the object. (circular
references).
The reason this needs to happen is that we don't generate an ID for
every object, so we won't necessarily know which ID the object will
have in the future. When it is 'cooked' ( see _cook ), it will be
assigned an ID, and the temporary placeholder list created here will be
modified in-place to create an expression that gives this object an ID:
[reference id# [object-jelly]].
"""
# create a placeholder list to be preserved
self.preserved[id(object)] = []
# keep a reference to this object around, so it doesn't disappear!
# (This isn't always necessary, but for cases where the objects are
# dynamically generated by __getstate__ or getStateToCopyFor calls, it
# is; id() will return the same value for a different object if it gets
# garbage collected. This may be optimized later.)
self.cooker[id(object)] = object
return []
def preserve(self, object, sexp):
"""
(internal) Mark an object's persistent list for later referral.
"""
# if I've been cooked in the meanwhile,
if id(object) in self.cooked:
# replace the placeholder empty list with the real one
self.preserved[id(object)][2] = sexp
# but give this one back.
sexp = self.preserved[id(object)]
else:
self.preserved[id(object)] = sexp
return sexp
constantTypes = {bytes: 1, unicode: 1, int: 1, float: 1, long: 1}
def _checkMutable(self,obj):
objId = id(obj)
if objId in self.cooked:
return self.cooked[objId]
if objId in self.preserved:
self._cook(obj)
return self.cooked[objId]
def jelly(self, obj):
if isinstance(obj, Jellyable):
preRef = self._checkMutable(obj)
if preRef:
return preRef
return obj.jellyFor(self)
objType = type(obj)
if self.taster.isTypeAllowed(qual(objType).encode('utf-8')):
# "Immutable" Types
if ((objType is bytes) or
(objType is int) or
(objType is long) or
(objType is float)):
return obj
elif objType is types.MethodType:
aSelf = obj.__self__ if _PY3 else obj.im_self
aFunc = obj.__func__ if _PY3 else obj.im_func
aClass = aSelf.__class__ if _PY3 else obj.im_class
return [b"method", aFunc.__name__, self.jelly(aSelf),
self.jelly(aClass)]
elif objType is unicode:
return [b'unicode', obj.encode('UTF-8')]
elif objType is type(None):
return [b'None']
elif objType is types.FunctionType:
return [b'function', obj.__module__ + '.' +
(obj.__qualname__ if _PY3 else obj.__name__)]
elif objType is types.ModuleType:
return [b'module', obj.__name__]
elif objType is bool:
return [b'boolean', obj and b'true' or b'false']
elif objType is datetime.datetime:
if obj.tzinfo:
raise NotImplementedError(
"Currently can't jelly datetime objects with tzinfo")
return [b'datetime', ' '.join([unicode(x) for x in (
obj.year, obj.month, obj.day, obj.hour,
obj.minute, obj.second, obj.microsecond)]
).encode('utf-8')]
elif objType is datetime.time:
if obj.tzinfo:
raise NotImplementedError(
"Currently can't jelly datetime objects with tzinfo")
return [b'time', '%s %s %s %s' % (obj.hour, obj.minute,
obj.second, obj.microsecond)]
elif objType is datetime.date:
return [b'date', '%s %s %s' % (obj.year, obj.month, obj.day)]
elif objType is datetime.timedelta:
return [b'timedelta', '%s %s %s' % (obj.days, obj.seconds,
obj.microseconds)]
elif issubclass(objType, (type, _OldStyleClass)):
return [b'class', qual(obj).encode('utf-8')]
elif objType is decimal.Decimal:
return self.jelly_decimal(obj)
else:
preRef = self._checkMutable(obj)
if preRef:
return preRef
# "Mutable" Types
sxp = self.prepare(obj)
if objType is list:
sxp.extend(self._jellyIterable(list_atom, obj))
elif objType is tuple:
sxp.extend(self._jellyIterable(tuple_atom, obj))
elif objType in DictTypes:
sxp.append(dictionary_atom)
for key, val in obj.items():
sxp.append([self.jelly(key), self.jelly(val)])
elif objType in _SetTypes:
sxp.extend(self._jellyIterable(set_atom, obj))
elif objType in _ImmutableSetTypes:
sxp.extend(self._jellyIterable(frozenset_atom, obj))
else:
className = qual(obj.__class__).encode('utf-8')
persistent = None
if self.persistentStore:
persistent = self.persistentStore(obj, self)
if persistent is not None:
sxp.append(persistent_atom)
sxp.append(persistent)
elif self.taster.isClassAllowed(obj.__class__):
sxp.append(className)
if hasattr(obj, "__getstate__"):
state = obj.__getstate__()
else:
state = obj.__dict__
sxp.append(self.jelly(state))
else:
self.unpersistable(
"instance of class %s deemed insecure" %
qual(obj.__class__), sxp)
return self.preserve(obj, sxp)
else:
if objType is _OldStyleInstance:
raise InsecureJelly("Class not allowed for instance: %s %s" %
(obj.__class__, obj))
raise InsecureJelly("Type not allowed for object: %s %s" %
(objType, obj))
def _jellyIterable(self, atom, obj):
"""
Jelly an iterable object.
@param atom: the identifier atom of the object.
@type atom: C{str}
@param obj: any iterable object.
@type obj: C{iterable}
@return: a generator of jellied data.
@rtype: C{generator}
"""
yield atom
for item in obj:
yield self.jelly(item)
def jelly_decimal(self, d):
"""
Jelly a decimal object.
@param d: a decimal object to serialize.
@type d: C{decimal.Decimal}
@return: jelly for the decimal object.
@rtype: C{list}
"""
sign, guts, exponent = d.as_tuple()
value = reduce(lambda left, right: left * 10 + right, guts)
if sign:
value = -value
return [b'decimal', value, exponent]
def unpersistable(self, reason, sxp=None):
"""
(internal) Returns an sexp: (unpersistable "reason"). Utility method
for making note that a particular object could not be serialized.
"""
if sxp is None:
sxp = []
sxp.append(unpersistable_atom)
sxp.append(reason)
return sxp
class _Unjellier:
def __init__(self, taster, persistentLoad, invoker):
self.taster = taster
self.persistentLoad = persistentLoad
self.references = {}
self.postCallbacks = []
self.invoker = invoker
def unjellyFull(self, obj):
o = self.unjelly(obj)
for m in self.postCallbacks:
m()
return o
def _maybePostUnjelly(self, unjellied):
"""
If the given object has support for the C{postUnjelly} hook, set it up
to be called at the end of deserialization.
@param unjellied: an object that has already been unjellied.
@return: C{unjellied}
"""
if hasattr(unjellied, 'postUnjelly'):
self.postCallbacks.append(unjellied.postUnjelly)
return unjellied
def unjelly(self, obj):
if type(obj) is not list:
return obj
jelTypeBytes = obj[0]
if not self.taster.isTypeAllowed(jelTypeBytes):
raise InsecureJelly(jelTypeBytes)
regClass = unjellyableRegistry.get(jelTypeBytes)
if regClass is not None:
method = getattr(_createBlank(regClass), "unjellyFor", regClass)
return self._maybePostUnjelly(method(self, obj))
regFactory = unjellyableFactoryRegistry.get(jelTypeBytes)
if regFactory is not None:
return self._maybePostUnjelly(regFactory(self.unjelly(obj[1])))
jelTypeText = nativeString(jelTypeBytes)
thunk = getattr(self, '_unjelly_%s' % jelTypeText, None)
if thunk is not None:
return thunk(obj[1:])
else:
nameSplit = jelTypeText.split('.')
modName = '.'.join(nameSplit[:-1])
if not self.taster.isModuleAllowed(modName):
raise InsecureJelly(
"Module %s not allowed (in type %s)." % (modName, jelTypeText))
clz = namedObject(jelTypeText)
if not self.taster.isClassAllowed(clz):
raise InsecureJelly("Class %s not allowed." % jelTypeText)
return self._genericUnjelly(clz, obj[1])
def _genericUnjelly(self, cls, state):
"""
Unjelly a type for which no specific unjellier is registered, but which
is nonetheless allowed.
@param cls: the class of the instance we are unjellying.
@type cls: L{_OldStyleClass} or L{type}
@param state: The jellied representation of the object's state; its
C{__dict__} unless it has a C{__setstate__} that takes something
else.
@type state: L{list}
@return: the new, unjellied instance.
"""
return self._maybePostUnjelly(_newInstance(cls, self.unjelly(state)))
def _unjelly_None(self, exp):
return None
def _unjelly_unicode(self, exp):
return unicode(exp[0], "UTF-8")
def _unjelly_decimal(self, exp):
"""
Unjelly decimal objects.
"""
value = exp[0]
exponent = exp[1]
if value < 0:
sign = 1
else:
sign = 0
guts = decimal.Decimal(value).as_tuple()[1]
return decimal.Decimal((sign, guts, exponent))
def _unjelly_boolean(self, exp):
if bool:
assert exp[0] in (b'true', b'false')
return exp[0] == b'true'
else:
return Unpersistable("Could not unpersist boolean: %s" % (exp[0],))
def _unjelly_datetime(self, exp):
return datetime.datetime(*map(int, exp[0].split()))
def _unjelly_date(self, exp):
return datetime.date(*map(int, exp[0].split()))
def _unjelly_time(self, exp):
return datetime.time(*map(int, exp[0].split()))
def _unjelly_timedelta(self, exp):
days, seconds, microseconds = map(int, exp[0].split())
return datetime.timedelta(
days=days, seconds=seconds, microseconds=microseconds)
def unjellyInto(self, obj, loc, jel):
o = self.unjelly(jel)
if isinstance(o, NotKnown):
o.addDependant(obj, loc)
obj[loc] = o
return o
def _unjelly_dereference(self, lst):
refid = lst[0]
x = self.references.get(refid)
if x is not None:
return x
der = _Dereference(refid)
self.references[refid] = der
return der
def _unjelly_reference(self, lst):
refid = lst[0]
exp = lst[1]
o = self.unjelly(exp)
ref = self.references.get(refid)
if (ref is None):
self.references[refid] = o
elif isinstance(ref, NotKnown):
ref.resolveDependants(o)
self.references[refid] = o
else:
assert 0, "Multiple references with same ID!"
return o
def _unjelly_tuple(self, lst):
l = list(range(len(lst)))
finished = 1
for elem in l:
if isinstance(self.unjellyInto(l, elem, lst[elem]), NotKnown):
finished = 0
if finished:
return tuple(l)
else:
return _Tuple(l)
def _unjelly_list(self, lst):
l = list(range(len(lst)))
for elem in l:
self.unjellyInto(l, elem, lst[elem])
return l
def _unjellySetOrFrozenset(self, lst, containerType):
"""
Helper method to unjelly set or frozenset.
@param lst: the content of the set.
@type lst: C{list}
@param containerType: the type of C{set} to use.
"""
l = list(range(len(lst)))
finished = True
for elem in l:
data = self.unjellyInto(l, elem, lst[elem])
if isinstance(data, NotKnown):
finished = False
if not finished:
return _Container(l, containerType)
else:
return containerType(l)
def _unjelly_set(self, lst):
"""
Unjelly set using the C{set} builtin.
"""
return self._unjellySetOrFrozenset(lst, set)
def _unjelly_frozenset(self, lst):
"""
Unjelly frozenset using the C{frozenset} builtin.
"""
return self._unjellySetOrFrozenset(lst, frozenset)
def _unjelly_dictionary(self, lst):
d = {}
for k, v in lst:
kvd = _DictKeyAndValue(d)
self.unjellyInto(kvd, 0, k)
self.unjellyInto(kvd, 1, v)
return d
def _unjelly_module(self, rest):
moduleName = nativeString(rest[0])
if type(moduleName) != str:
raise InsecureJelly(
"Attempted to unjelly a module with a non-string name.")
if not self.taster.isModuleAllowed(moduleName):
raise InsecureJelly(
"Attempted to unjelly module named %r" % (moduleName,))
mod = __import__(moduleName, {}, {},"x")
return mod
def _unjelly_class(self, rest):
cname = nativeString(rest[0])
clist = cname.split(nativeString('.'))
modName = nativeString('.').join(clist[:-1])
if not self.taster.isModuleAllowed(modName):
raise InsecureJelly("module %s not allowed" % modName)
klaus = namedObject(cname)
objType = type(klaus)
if objType not in (_OldStyleClass, type):
raise InsecureJelly(
"class %r unjellied to something that isn't a class: %r" % (
cname, klaus))
if not self.taster.isClassAllowed(klaus):
raise InsecureJelly("class not allowed: %s" % qual(klaus))
return klaus
def _unjelly_function(self, rest):
fname = nativeString(rest[0])
modSplit = fname.split(nativeString('.'))
modName = nativeString('.').join(modSplit[:-1])
if not self.taster.isModuleAllowed(modName):
raise InsecureJelly("Module not allowed: %s" % modName)
# XXX do I need an isFunctionAllowed?
function = namedAny(fname)
return function
def _unjelly_persistent(self, rest):
if self.persistentLoad:
pload = self.persistentLoad(rest[0], self)
return pload
else:
return Unpersistable("Persistent callback not found")
def _unjelly_instance(self, rest):
"""
(internal) Unjelly an instance.
Called to handle the deprecated I{instance} token.
@param rest: The s-expression representing the instance.
@return: The unjellied instance.
"""
warnings.warn_explicit(
"Unjelly support for the instance atom is deprecated since "
"Twisted 15.0.0. Upgrade peer for modern instance support.",
category=DeprecationWarning, filename="", lineno=0)
clz = self.unjelly(rest[0])
if not _PY3 and type(clz) is not _OldStyleClass:
raise InsecureJelly("Legacy 'instance' found with new-style class")
return self._genericUnjelly(clz, rest[1])
def _unjelly_unpersistable(self, rest):
return Unpersistable("Unpersistable data: %s" % (rest[0],))
def _unjelly_method(self, rest):
"""
(internal) Unjelly a method.
"""
im_name = rest[0]
im_self = self.unjelly(rest[1])
im_class = self.unjelly(rest[2])
if not isinstance(im_class, (type, _OldStyleClass)):
raise InsecureJelly("Method found with non-class class.")
if im_name in im_class.__dict__:
if im_self is None:
im = getattr(im_class, im_name)
elif isinstance(im_self, NotKnown):
im = _InstanceMethod(im_name, im_self, im_class)
else:
im = types.MethodType(im_class.__dict__[im_name], im_self,
*([im_class] * (not _PY3)))
else:
raise TypeError('instance method changed')
return im
#### Published Interface.
class InsecureJelly(Exception):
"""
This exception will be raised when a jelly is deemed `insecure'; e.g. it
contains a type, class, or module disallowed by the specified `taster'
"""
class DummySecurityOptions:
"""
DummySecurityOptions() -> insecure security options
Dummy security options -- this class will allow anything.
"""
def isModuleAllowed(self, moduleName):
"""
DummySecurityOptions.isModuleAllowed(moduleName) -> boolean
returns 1 if a module by that name is allowed, 0 otherwise
"""
return 1
def isClassAllowed(self, klass):
"""
DummySecurityOptions.isClassAllowed(class) -> boolean
Assumes the module has already been allowed. Returns 1 if the given
class is allowed, 0 otherwise.
"""
return 1
def isTypeAllowed(self, typeName):
"""
DummySecurityOptions.isTypeAllowed(typeName) -> boolean
Returns 1 if the given type is allowed, 0 otherwise.
"""
return 1
class SecurityOptions:
"""
This will by default disallow everything, except for 'none'.
"""
basicTypes = ["dictionary", "list", "tuple",
"reference", "dereference", "unpersistable",
"persistent", "long_int", "long", "dict"]
def __init__(self):
"""
SecurityOptions() initialize.
"""
# I don't believe any of these types can ever pose a security hazard,
# except perhaps "reference"...
self.allowedTypes = {
b"None": 1, b"bool": 1, b"boolean": 1, b"string": 1, b"str": 1,
b"int": 1, b"float": 1, b"datetime": 1, b"time": 1, b"date": 1,
b"timedelta": 1, b"NoneType": 1, b'unicode': 1, b'decimal': 1,
b'set': 1, b'frozenset': 1,
}
self.allowedModules = {}
self.allowedClasses = {}
def allowBasicTypes(self):
"""
Allow all `basic' types. (Dictionary and list. Int, string, and float
are implicitly allowed.)
"""
self.allowTypes(*self.basicTypes)
def allowTypes(self, *types):
"""
SecurityOptions.allowTypes(typeString): Allow a particular type, by its
name.
"""
for typ in types:
if isinstance(typ, unicode):
typ = typ.encode('utf-8')
if not isinstance(typ, bytes):
typ = qual(typ)
self.allowedTypes[typ] = 1
def allowInstancesOf(self, *classes):
"""
SecurityOptions.allowInstances(klass, klass, ...): allow instances
of the specified classes
This will also allow the 'instance', 'class' (renamed 'classobj' in
Python 2.3), and 'module' types, as well as basic types.
"""
self.allowBasicTypes()
self.allowTypes("instance", "class", "classobj", "module")
for klass in classes:
self.allowTypes(qual(klass))
self.allowModules(klass.__module__)
self.allowedClasses[klass] = 1
def allowModules(self, *modules):
"""
SecurityOptions.allowModules(module, module, ...): allow modules by
name. This will also allow the 'module' type.
"""
for module in modules:
if type(module) == types.ModuleType:
module = module.__name__
if not isinstance(module, bytes):
module = module.encode('utf-8')
self.allowedModules[module] = 1
def isModuleAllowed(self, moduleName):
"""
SecurityOptions.isModuleAllowed(moduleName) -> boolean
returns 1 if a module by that name is allowed, 0 otherwise
"""
if not isinstance(moduleName, bytes):
moduleName = moduleName.encode('utf-8')
return moduleName in self.allowedModules
def isClassAllowed(self, klass):
"""
SecurityOptions.isClassAllowed(class) -> boolean
Assumes the module has already been allowed. Returns 1 if the given
class is allowed, 0 otherwise.
"""
return klass in self.allowedClasses
def isTypeAllowed(self, typeName):
"""
SecurityOptions.isTypeAllowed(typeName) -> boolean
Returns 1 if the given type is allowed, 0 otherwise.
"""
if not isinstance(typeName, bytes):
typeName = typeName.encode('utf-8')
return (typeName in self.allowedTypes or b'.' in typeName)
globalSecurity = SecurityOptions()
globalSecurity.allowBasicTypes()
def jelly(object, taster=DummySecurityOptions(), persistentStore=None,
invoker=None):
"""
Serialize to s-expression.
Returns a list which is the serialized representation of an object. An
optional 'taster' argument takes a SecurityOptions and will mark any
insecure objects as unpersistable rather than serializing them.
"""
return _Jellier(taster, persistentStore, invoker).jelly(object)
def unjelly(sexp, taster=DummySecurityOptions(), persistentLoad=None,
invoker=None):
"""
Unserialize from s-expression.
Takes a list that was the result from a call to jelly() and unserializes
an arbitrary object from it. The optional 'taster' argument, an instance
of SecurityOptions, will cause an InsecureJelly exception to be raised if a
disallowed type, module, or class attempted to unserialize.
"""
return _Unjellier(taster, persistentLoad, invoker).unjellyFull(sexp)
| {
"content_hash": "4e4a5a879d3903f253dd9c5962aaa421",
"timestamp": "",
"source": "github",
"line_count": 1125,
"max_line_length": 83,
"avg_line_length": 32.22577777777778,
"alnum_prop": 0.5940861698019528,
"repo_name": "EricMuller/mywebmarks-backend",
"id": "c92c3898658d51820dc34eebebf8c29f69258fec",
"size": "36384",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/src/twisted/spread/jelly.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "23736"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "66211"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "1087560"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "23014526"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "15482"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table(u'crowdataapp_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length='128')),
('country', self.gf('django_countries.fields.CountryField')(max_length=2, null=True)),
('show_in_leaderboard', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'crowdataapp', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table(u'crowdataapp_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'stored_validity_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '3', 'decimal_places': '2'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'head_html': ('django.db.models.fields.TextField', [], {'default': '\'<!-- <script> or <link rel="stylesheet"> tags go here -->\'', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'crowdataapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'show_in_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | {
"content_hash": "7932469d44b2cbb9da3e2d2d9f6b2a16",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 400,
"avg_line_length": 79.43046357615894,
"alnum_prop": 0.5621143905285977,
"repo_name": "the-engine-room/replication-sprint-02",
"id": "6d7022c5645d4ad9ad6a5a6eb269a24c83903e22",
"size": "12018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowdataapp/migrations/0010_auto__add_userprofile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "112528"
},
{
"name": "HTML",
"bytes": "71304"
},
{
"name": "JavaScript",
"bytes": "75299"
},
{
"name": "Python",
"bytes": "511170"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.