Search is not available for this dataset
text stringlengths 75 104k |
|---|
def outer_left_join(self, join_streamlet, window_config, join_function):
"""Return a new Streamlet by left join_streamlet with this streamlet
"""
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_LEFT, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result |
def outer_join(self, join_streamlet, window_config, join_function):
"""Return a new Streamlet by outer join_streamlet with this streamlet
"""
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result |
def reduce_by_key_and_window(self, window_config, reduce_function):
"""Return a new Streamlet in which each (key, value) pair of this Streamlet are collected
over the time_window and then reduced using the reduce_function
"""
from heronpy.streamlet.impl.reducebykeyandwindowbolt import ReduceByKeyAndWindowStreamlet
reduce_streamlet = ReduceByKeyAndWindowStreamlet(window_config, reduce_function, self)
self._add_child(reduce_streamlet)
return reduce_streamlet |
def _default_stage_name_calculator(self, prefix, existing_stage_names):
"""This is the method that's implemented by the operators to get the name of the Streamlet
:return: The name of the operator
"""
index = 1
calculated_name = ""
while True:
calculated_name = prefix + "-" + str(index)
if calculated_name not in existing_stage_names:
return calculated_name
index = index + 1
return "Should Never Got Here" |
def get(self):
""" get method """
clusters = self.get_arguments(constants.PARAM_CLUSTER)
environs = self.get_arguments(constants.PARAM_ENVIRON)
topology_names = self.get_arguments(constants.PARAM_TOPOLOGY)
ret = {}
if len(topology_names) > 1:
if not clusters:
message = "Missing argument" + constants.PARAM_CLUSTER
self.write_error_response(message)
return
if not environs:
message = "Missing argument" + constants.PARAM_ENVIRON
self.write_error_response(message)
return
ret = {}
topologies = self.tracker.topologies
for topology in topologies:
cluster = topology.cluster
environ = topology.environ
topology_name = topology.name
if not cluster or not environ:
continue
# This cluster is not asked for.
if clusters and cluster not in clusters:
continue
# This environ is not asked for.
if environs and environ not in environs:
continue
if topology_names and topology_name not in topology_names:
continue
if cluster not in ret:
ret[cluster] = {}
if environ not in ret[cluster]:
ret[cluster][environ] = {}
ret[cluster][environ][topology_name] = topology.get_machines()
self.write_success_response(ret) |
def _async_stream_process_output(process, stream_fn, handler):
""" Stream and handle the output of a process
:param process: the process to stream the output for
:param stream_fn: the function that applies handler to process
:param handler: a function that will be called for each log line
:return: None
"""
logging_thread = Thread(target=stream_fn, args=(process, handler, ))
# Setting the logging thread as a daemon thread will allow it to exit with the program
# rather than blocking the exit waiting for it to be handled manually.
logging_thread.daemon = True
logging_thread.start()
return logging_thread |
def pick(dirname, pattern):
'''
Get the topology jars
:param dirname:
:param pattern:
:return:
'''
file_list = fnmatch.filter(os.listdir(dirname), pattern)
return file_list[0] if file_list else None |
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
component = self.get_argument_component()
metric_names = self.get_required_arguments_metricnames()
start_time = self.get_argument_starttime()
end_time = self.get_argument_endtime()
self.validateInterval(start_time, end_time)
instances = self.get_arguments(constants.PARAM_INSTANCE)
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
metrics = yield tornado.gen.Task(metricstimeline.getMetricsTimeline,
topology.tmaster, component, metric_names,
instances, int(start_time), int(end_time))
self.write_success_response(metrics)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e) |
def create_parser():
""" create parser """
help_epilog = '''Getting more help:
heron-explorer help <command> Disply help and options for <command>\n
For detailed documentation, go to http://heronstreaming.io'''
parser = argparse.ArgumentParser(
prog='heron-explorer',
epilog=help_epilog,
formatter_class=SubcommandHelpFormatter,
add_help=False)
# sub-commands
subparsers = parser.add_subparsers(
title="Available commands",
metavar='<command> <options>')
# subparser for subcommands related to clusters
clusters.create_parser(subparsers)
# subparser for subcommands related to logical plan
logicalplan.create_parser(subparsers)
# subparser for subcommands related to physical plan
physicalplan.create_parser(subparsers)
# subparser for subcommands related to displaying info
topologies.create_parser(subparsers)
# subparser for help subcommand
help.create_parser(subparsers)
# subparser for version subcommand
version.create_parser(subparsers)
return parser |
def run(command, *args):
""" run command """
# show all clusters
if command == 'clusters':
return clusters.run(command, *args)
# show topologies
elif command == 'topologies':
return topologies.run(command, *args)
# physical plan
elif command == 'containers':
return physicalplan.run_containers(command, *args)
elif command == 'metrics':
return physicalplan.run_metrics(command, *args)
# logical plan
elif command == 'components':
return logicalplan.run_components(command, *args)
elif command == 'spouts':
return logicalplan.run_spouts(command, *args)
elif command == 'bolts':
return logicalplan.run_bolts(command, *args)
# help
elif command == 'help':
return help.run(command, *args)
# version
elif command == 'version':
return version.run(command, *args)
return 1 |
def extract_common_args(command, parser, cl_args):
""" extract common args """
try:
# do not pop like cli because ``topologies`` subcommand still needs it
cluster_role_env = cl_args['cluster/[role]/[env]']
config_path = cl_args['config_path']
except KeyError:
# if some of the arguments are not found, print error and exit
subparser = config.get_subparser(parser, command)
print(subparser.format_help())
return dict()
cluster = config.get_heron_cluster(cluster_role_env)
config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
new_cl_args = dict()
try:
cluster_tuple = config.parse_cluster_role_env(cluster_role_env, config_path)
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['config_path'] = config_path
except Exception as e:
Log.error("Unable to get valid topology location: %s", str(e))
return dict()
cl_args.update(new_cl_args)
return cl_args |
def main(args):
""" main """
# create the argument parser
parser = create_parser()
# if no argument is provided, print help and exit
if not args:
parser.print_help()
return 0
# insert the boolean values for some of the options
all_args = parse.insert_bool_values(args)
# parse the args
args, unknown_args = parser.parse_known_args(args=all_args)
command_line_args = vars(args)
command = command_line_args['subcommand']
if unknown_args:
Log.error('Unknown argument: %s', unknown_args[0])
# show help message
command_line_args['help-command'] = command
command = 'help'
if command not in ['help', 'version']:
opts.set_tracker_url(command_line_args)
log.set_logging_level(command_line_args)
if command not in ['topologies', 'clusters']:
command_line_args = extract_common_args(command, parser, command_line_args)
if not command_line_args:
return 1
Log.info("Using tracker URL: %s", command_line_args["tracker_url"])
# timing command execution
start = time.time()
ret = run(command, parser, command_line_args, unknown_args)
end = time.time()
if command != 'help':
sys.stdout.flush()
Log.info('Elapsed time: %.3fs.', (end - start))
return 0 if ret else 1 |
def expand_args(command):
"""Parses command strings and returns a Popen-ready list."""
# Prepare arguments.
if isinstance(command, (str, unicode)):
splitter = shlex.shlex(command.encode('utf-8'))
splitter.whitespace = '|'
splitter.whitespace_split = True
command = []
while True:
token = splitter.get_token()
if token:
command.append(token)
else:
break
command = list(map(shlex.split, command))
return command |
def run(command, data=None, timeout=None, kill_timeout=None, env=None, cwd=None):
"""Executes a given commmand and returns Response.
Blocks until process is complete, or timeout is reached.
"""
command = expand_args(command)
history = []
for c in command:
if len(history):
# due to broken pipe problems pass only first 10 KiB
data = history[-1].std_out[0:10*1024]
cmd = Command(c)
try:
out, err = cmd.run(data, timeout, kill_timeout, env, cwd)
status_code = cmd.returncode
except OSError as e:
out, err = '', u"\n".join([e.strerror, traceback.format_exc()])
status_code = 127
r = Response(process=cmd)
r.command = c
r.std_out = out
r.std_err = err
r.status_code = status_code
history.append(r)
r = history.pop()
r.history = history
return r |
def connect(command, data=None, env=None, cwd=None):
"""Spawns a new process from the given command."""
# TODO: support piped commands
command_str = expand_args(command).pop()
environ = dict(os.environ)
environ.update(env or {})
process = subprocess.Popen(command_str,
universal_newlines=True,
shell=False,
env=environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
cwd=cwd,
)
return ConnectedCommand(process=process) |
def send(self, str, end='\n'):
"""Sends a line to std_in."""
return self._process.stdin.write(str+end) |
def bracket_split(source, brackets=('()', '{}', '[]'), strip=False):
"""DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)"""
starts = [e[0] for e in brackets]
in_bracket = 0
n = 0
last = 0
while n < len(source):
e = source[n]
if not in_bracket and e in starts:
in_bracket = 1
start = n
b_start, b_end = brackets[starts.index(e)]
elif in_bracket:
if e == b_start:
in_bracket += 1
elif e == b_end:
in_bracket -= 1
if not in_bracket:
if source[last:start]:
yield source[last:start]
last = n + 1
yield source[start + strip:n + 1 - strip]
n += 1
if source[last:]:
yield source[last:] |
def Js(val, Clamped=False):
'''Converts Py type to PyJs type'''
if isinstance(val, PyJs):
return val
elif val is None:
return undefined
elif isinstance(val, basestring):
return PyJsString(val, StringPrototype)
elif isinstance(val, bool):
return true if val else false
elif isinstance(val, float) or isinstance(val, int) or isinstance(
val, long) or (NUMPY_AVAILABLE and isinstance(
val,
(numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
numpy.int32, numpy.uint32, numpy.float32, numpy.float64))):
# This is supposed to speed things up. may not be the case
if val in NUM_BANK:
return NUM_BANK[val]
return PyJsNumber(float(val), NumberPrototype)
elif isinstance(val, FunctionType):
return PyJsFunction(val, FunctionPrototype)
#elif isinstance(val, ModuleType):
# mod = {}
# for name in dir(val):
# value = getattr(val, name)
# if isinstance(value, ModuleType):
# continue # prevent recursive module conversion
# try:
# jsval = HJs(value)
# except RuntimeError:
# print 'Could not convert %s to PyJs object!' % name
# continue
# mod[name] = jsval
# return Js(mod)
#elif isintance(val, ClassType):
elif isinstance(val, dict): # convert to object
temp = PyJsObject({}, ObjectPrototype)
for k, v in six.iteritems(val):
temp.put(Js(k), Js(v))
return temp
elif isinstance(val, (list, tuple)): #Convert to array
return PyJsArray(val, ArrayPrototype)
# convert to typedarray
elif isinstance(val, JsObjectWrapper):
return val.__dict__['_obj']
elif NUMPY_AVAILABLE and isinstance(val, numpy.ndarray):
if val.dtype == numpy.int8:
return PyJsInt8Array(val, Int8ArrayPrototype)
elif val.dtype == numpy.uint8 and not Clamped:
return PyJsUint8Array(val, Uint8ArrayPrototype)
elif val.dtype == numpy.uint8 and Clamped:
return PyJsUint8ClampedArray(val, Uint8ClampedArrayPrototype)
elif val.dtype == numpy.int16:
return PyJsInt16Array(val, Int16ArrayPrototype)
elif val.dtype == numpy.uint16:
return PyJsUint16Array(val, Uint16ArrayPrototype)
elif val.dtype == numpy.int32:
return PyJsInt32Array(val, Int32ArrayPrototype)
elif val.dtype == numpy.uint32:
return PyJsUint16Array(val, Uint32ArrayPrototype)
elif val.dtype == numpy.float32:
return PyJsFloat32Array(val, Float32ArrayPrototype)
elif val.dtype == numpy.float64:
return PyJsFloat64Array(val, Float64ArrayPrototype)
else: # try to convert to js object
return py_wrap(val) |
def PyJsStrictEq(a, b):
'''a===b'''
tx, ty = Type(a), Type(b)
if tx != ty:
return false
if tx == 'Undefined' or tx == 'Null':
return true
if a.is_primitive(): #string bool and number case
return Js(a.value == b.value)
if a.Class == b.Class == 'PyObjectWrapper':
return Js(a.obj == b.obj)
return Js(a is b) |
def put(self, prop, val, op=None): #external use!
'''Just like in js: self.prop op= val
for example when op is '+' it will be self.prop+=val
op can be either None for simple assignment or one of:
* / % + - << >> & ^ |'''
if self.Class == 'Undefined' or self.Class == 'Null':
raise MakeError('TypeError',
'Undefined and null dont have properties!')
if not isinstance(prop, basestring):
prop = prop.to_string().value
if NUMPY_AVAILABLE and prop.isdigit():
if self.Class == 'Int8Array':
val = Js(numpy.int8(val.to_number().value))
elif self.Class == 'Uint8Array':
val = Js(numpy.uint8(val.to_number().value))
elif self.Class == 'Uint8ClampedArray':
if val < Js(numpy.uint8(0)):
val = Js(numpy.uint8(0))
elif val > Js(numpy.uint8(255)):
val = Js(numpy.uint8(255))
else:
val = Js(numpy.uint8(val.to_number().value))
elif self.Class == 'Int16Array':
val = Js(numpy.int16(val.to_number().value))
elif self.Class == 'Uint16Array':
val = Js(numpy.uint16(val.to_number().value))
elif self.Class == 'Int32Array':
val = Js(numpy.int32(val.to_number().value))
elif self.Class == 'Uint32Array':
val = Js(numpy.uint32(val.to_number().value))
elif self.Class == 'Float32Array':
val = Js(numpy.float32(val.to_number().value))
elif self.Class == 'Float64Array':
val = Js(numpy.float64(val.to_number().value))
if isinstance(self.buff, numpy.ndarray):
self.buff[int(prop)] = int(val.to_number().value)
#we need to set the value to the incremented one
if op is not None:
val = getattr(self.get(prop), OP_METHODS[op])(val)
if not self.can_put(prop):
return val
own_desc = self.get_own_property(prop)
if is_data_descriptor(own_desc):
if self.Class in [
'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',
'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',
'Float32Array', 'Float64Array'
]:
self.define_own_property(prop, {'value': val})
else:
self.own[prop]['value'] = val
return val
desc = self.get_property(prop)
if is_accessor_descriptor(desc):
desc['set'].call(self, (val, ))
else:
new = {
'value': val,
'writable': True,
'configurable': True,
'enumerable': True
}
if self.Class in [
'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray',
'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',
'Float32Array', 'Float64Array'
]:
self.define_own_property(prop, new)
else:
self.own[prop] = new
return val |
def abstract_relational_comparison(self, other, self_first=True):
''' self<other if self_first else other<self.
Returns the result of the question: is self smaller than other?
in case self_first is false it returns the answer of:
is other smaller than self.
result is PyJs type: bool or undefined'''
px = self.to_primitive('Number')
py = other.to_primitive('Number')
if not self_first: #reverse order
px, py = py, px
if not (px.Class == 'String' and py.Class == 'String'):
px, py = px.to_number(), py.to_number()
if px.is_nan() or py.is_nan():
return undefined
return Js(px.value < py.value) # same cmp algorithm
else:
# I am pretty sure that python has the same
# string cmp algorithm but I have to confirm it
return Js(px.value < py.value) |
def abstract_equality_comparison(self, other):
''' returns the result of JS == compare.
result is PyJs type: bool'''
tx, ty = self.TYPE, other.TYPE
if tx == ty:
if tx == 'Undefined' or tx == 'Null':
return true
if tx == 'Number' or tx == 'String' or tx == 'Boolean':
return Js(self.value == other.value)
return Js(self is other) # Object
elif (tx == 'Undefined' and ty == 'Null') or (ty == 'Undefined'
and tx == 'Null'):
return true
elif tx == 'Number' and ty == 'String':
return self.abstract_equality_comparison(other.to_number())
elif tx == 'String' and ty == 'Number':
return self.to_number().abstract_equality_comparison(other)
elif tx == 'Boolean':
return self.to_number().abstract_equality_comparison(other)
elif ty == 'Boolean':
return self.abstract_equality_comparison(other.to_number())
elif (tx == 'String' or tx == 'Number') and other.is_object():
return self.abstract_equality_comparison(other.to_primitive())
elif (ty == 'String' or ty == 'Number') and self.is_object():
return self.to_primitive().abstract_equality_comparison(other)
else:
return false |
def callprop(self, prop, *args):
'''Call a property prop as a method (this will be self).
NOTE: dont pass this and arguments here, these will be added
automatically!'''
if not isinstance(prop, basestring):
prop = prop.to_string().value
cand = self.get(prop)
if not cand.is_callable():
raise MakeError('TypeError',
'%s is not a function' % cand.typeof())
return cand.call(self, args) |
def _set_name(self, name):
'''name is py type'''
if self.own.get('name'):
self.func_name = name
self.own['name']['value'] = Js(name) |
def call(self, this, args=()):
'''Calls this function and returns a result
(converted to PyJs type so func can return python types)
this must be a PyJs object and args must be a python tuple of PyJs objects.
arguments object is passed automatically and will be equal to Js(args)
(tuple converted to arguments object).You dont need to worry about number
of arguments you provide if you supply less then missing ones will be set
to undefined (but not present in arguments object).
And if you supply too much then excess will not be passed
(but they will be present in arguments object).
'''
if not hasattr(args, '__iter__'): #get rid of it later
args = (args, )
args = tuple(Js(e) for e in args) # this wont be needed later
arguments = PyJsArguments(
args, self) # tuple will be converted to arguments object.
arglen = self.argcount #function expects this number of args.
if len(args) > arglen:
args = args[0:arglen]
elif len(args) < arglen:
args += (undefined, ) * (arglen - len(args))
args += this, arguments #append extra params to the arg list
try:
return Js(self.code(*args))
except NotImplementedError:
raise
except RuntimeError as e: # maximum recursion
raise MakeError(
'RangeError', e.message if
not isinstance(e, NotImplementedError) else 'Not implemented!') |
def remove_functions(source, all_inline=False):
"""removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions"""
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9 # 8 is length of 'function'
res = ''
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == 'function' and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
'.'): # allow function as a property name :)
n += 1
continue
if source[n + 8:].lstrip().startswith(
':'): # allow functions inside objects...
n += 1
continue
entered = n
res += source[last:n]
name = ''
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START: # hoisted function
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, '()')
if not args:
raise SyntaxError('Function misses bracket with argnames ()')
args = args.strip('() \n')
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
# I know its legal in JS but python does not allow duplicate argnames
# I will not work around it
raise SyntaxError(
'Function has duplicate argument names. Its not legal in this implementation. Sorry.'
)
block, n = pass_bracket(source, n, '{}')
if not block:
raise SyntaxError(
'Function does not have any code block to execute')
mixed = False # named function expression flag
if name and not all_inline:
# Here I will distinguish between named function expression (mixed) and a function statement
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
#print 'Ended ith keyword'
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
#print 'Ended with'+repr(before[-1]), before[-1]=='}'
mixed = True
else:
#print 'FUNCTION STATEMENT'
#its a function statement.
# todo remove fucking label if present!
hoisted[name] = block, args
if not name or mixed or all_inline: # its a function expression (can be both named and not named)
#print 'FUNCTION EXPRESSION'
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT # inline name
res += ' ' + iname
inline['%s@%s' % (
iname, name
)] = block, args #here added real name at the end because it has to be added to the func scope
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline |
def ConstructArray(self, py_arr):
''' note py_arr elems are NOT converted to PyJs types!'''
arr = self.NewArray(len(py_arr))
arr._init(py_arr)
return arr |
def ConstructObject(self, py_obj):
''' note py_obj items are NOT converted to PyJs types! '''
obj = self.NewObject()
for k, v in py_obj.items():
obj.put(unicode(k), v)
return obj |
def emit(self, op_code, *args):
''' Adds op_code with specified args to tape '''
self.tape.append(OP_CODES[op_code](*args)) |
def compile(self, start_loc=0):
''' Records locations of labels and compiles the code '''
self.label_locs = {} if self.label_locs is None else self.label_locs
loc = start_loc
while loc < len(self.tape):
if type(self.tape[loc]) == LABEL:
self.label_locs[self.tape[loc].num] = loc
del self.tape[loc]
continue
loc += 1
self.compiled = True |
def _call(self, func, this, args):
''' Calls a bytecode function func
NOTE: use !ONLY! when calling functions from native methods! '''
assert not func.is_native
# fake call - the the runner to return to the end of the file
old_contexts = self.contexts
old_return_locs = self.return_locs
old_curr_ctx = self.current_ctx
self.contexts = [FakeCtx()]
self.return_locs = [len(self.tape)] # target line after return
# prepare my ctx
my_ctx = func._generate_my_context(this, args)
self.current_ctx = my_ctx
# execute dunction
ret = self.run(my_ctx, starting_loc=self.label_locs[func.code])
# bring back old execution
self.current_ctx = old_curr_ctx
self.contexts = old_contexts
self.return_locs = old_return_locs
return ret |
def execute_fragment_under_context(self, ctx, start_label, end_label):
''' just like run but returns if moved outside of the specified fragment
# 4 different exectution results
# 0=normal, 1=return, 2=jump_outside, 3=errors
# execute_fragment_under_context returns:
# (return_value, typ, return_value/jump_loc/py_error)
# IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged.
'''
old_curr_ctx = self.current_ctx
self.ctx_depth += 1
old_stack_len = len(ctx.stack)
old_ret_len = len(self.return_locs)
old_ctx_len = len(self.contexts)
try:
self.current_ctx = ctx
return self._execute_fragment_under_context(
ctx, start_label, end_label)
except JsException as err:
if self.debug_mode:
self._on_fragment_exit("js errors")
# undo the things that were put on the stack (if any) to ensure a proper error recovery
del ctx.stack[old_stack_len:]
del self.return_locs[old_ret_len:]
del self.contexts[old_ctx_len :]
return undefined, 3, err
finally:
self.ctx_depth -= 1
self.current_ctx = old_curr_ctx
assert old_stack_len == len(ctx.stack) |
def pad(num, n=2, sign=False):
'''returns n digit string representation of the num'''
s = unicode(abs(num))
if len(s) < n:
s = '0' * (n - len(s)) + s
if not sign:
return s
if num >= 0:
return '+' + s
else:
return '-' + s |
def replacement_template(rep, source, span, npar):
"""Takes the replacement template and some info about the match and returns filled template
"""
n = 0
res = ''
while n < len(rep) - 1:
char = rep[n]
if char == '$':
if rep[n + 1] == '$':
res += '$'
n += 2
continue
elif rep[n + 1] == '`':
# replace with string that is BEFORE match
res += source[:span[0]]
n += 2
continue
elif rep[n + 1] == '\'':
# replace with string that is AFTER match
res += source[span[1]:]
n += 2
continue
elif rep[n + 1] in DIGS:
dig = rep[n + 1]
if n + 2 < len(rep) and rep[n + 2] in DIGS:
dig += rep[n + 2]
num = int(dig)
# we will not do any replacements if we dont have this npar or dig is 0
if not num or num > len(npar):
res += '$' + dig
else:
# None - undefined has to be replaced with ''
res += npar[num - 1] if npar[num - 1] else ''
n += 1 + len(dig)
continue
res += char
n += 1
if n < len(rep):
res += rep[-1]
return res |
def fix_js_args(func):
'''Use this function when unsure whether func takes this and arguments as its last 2 args.
It will append 2 args if it does not.'''
fcode = six.get_function_code(func)
fargs = fcode.co_varnames[fcode.co_argcount - 2:fcode.co_argcount]
if fargs == ('this', 'arguments') or fargs == ('arguments', 'var'):
return func
code = append_arguments(six.get_function_code(func), ('this', 'arguments'))
return types.FunctionType(
code,
six.get_function_globals(func),
func.__name__,
closure=six.get_function_closure(func)) |
def emit(self, what, *args):
''' what can be either name of the op, or node, or a list of statements.'''
if isinstance(what, basestring):
return self.exe.emit(what, *args)
elif isinstance(what, list):
self._emit_statement_list(what)
else:
return getattr(self, what['type'])(**what) |
def to_key(literal_or_identifier):
''' returns string representation of this object'''
if literal_or_identifier['type'] == 'Identifier':
return literal_or_identifier['name']
elif literal_or_identifier['type'] == 'Literal':
k = literal_or_identifier['value']
if isinstance(k, float):
return unicode(float_repr(k))
elif 'regex' in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return 'true' if k else 'false'
elif k is None:
return 'null'
else:
return unicode(k) |
def trans(ele, standard=False):
"""Translates esprima syntax tree to python by delegating to appropriate translating node"""
try:
node = globals().get(ele['type'])
if not node:
raise NotImplementedError('%s is not supported!' % ele['type'])
if standard:
node = node.__dict__[
'standard'] if 'standard' in node.__dict__ else node
return node(**ele)
except:
#print ele
raise |
def limited(func):
'''Decorator limiting resulting line length in order to avoid python parser stack overflow -
If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line
USE ONLY ON EXPRESSIONS!!! '''
def f(standard=False, **args):
insert_pos = len(
inline_stack.names
) # in case line is longer than limit we will have to insert the lval at current position
# this is because calling func will change inline_stack.
# we cant use inline_stack.require here because we dont know whether line overflows yet
res = func(**args)
if len(res) > LINE_LEN_LIMIT:
name = inline_stack.require('LONG')
inline_stack.names.pop()
inline_stack.names.insert(insert_pos, name)
res = 'def %s(var=var):\n return %s\n' % (name, res)
inline_stack.define(name, res)
return name + '()'
else:
return res
f.__dict__['standard'] = func
return f |
def is_lval(t):
"""Does not chceck whether t is not resticted or internal"""
if not t:
return False
i = iter(t)
if i.next() not in IDENTIFIER_START:
return False
return all(e in IDENTIFIER_PART for e in i) |
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False |
def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False):
"""js has to be a javascript source code.
returns equivalent python code."""
if use_compilation_plan and not '//' in js and not '/*' in js:
return translate_js_with_compilation_plan(js, HEADER=HEADER)
parser = pyjsparser.PyJsParser()
parsed = parser.parse(js) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
translating_nodes.clean_stacks()
return HEADER + translating_nodes.trans(
parsed) |
def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):
"""js has to be a javascript source code.
returns equivalent python code.
compile plans only work with the following restrictions:
- only enabled for oneliner expressions
- when there are comments in the js code string substitution is disabled
- when there nested escaped quotes string substitution is disabled, so
cacheable:
Q1 == 1 && name == 'harry'
not cacheable:
Q1 == 1 && name == 'harry' // some comment
not cacheable:
Q1 == 1 && name == 'o\'Reilly'
not cacheable:
Q1 == 1 && name /* some comment */ == 'o\'Reilly'
"""
match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(
js)
cp_hash = hashlib.md5(compilation_plan.encode('utf-8')).digest()
try:
python_code = cache[cp_hash]['proto_python_code']
except:
parser = pyjsparser.PyJsParser()
parsed = parser.parse(compilation_plan) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
translating_nodes.clean_stacks()
python_code = translating_nodes.trans(
parsed) # syntax tree to python code
cache[cp_hash] = {
'compilation_plan': compilation_plan,
'proto_python_code': python_code,
}
python_code = match_increaser_str.wrap_up(python_code)
python_code = match_increaser_num.wrap_up(python_code)
return HEADER + python_code |
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python() |
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out) |
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context |
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context) |
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult'] |
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass |
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult'] |
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01) |
def to_key(literal_or_identifier):
''' returns string representation of this object'''
if literal_or_identifier['type'] == 'Identifier':
return literal_or_identifier['name']
elif literal_or_identifier['type'] == 'Literal':
k = literal_or_identifier['value']
if isinstance(k, float):
return unicode(float_repr(k))
elif 'regex' in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return u'true' if k else u'false'
elif k is None:
return u'null'
else:
return unicode(k) |
def translate_js(js, top=TOP_GLOBAL):
"""js has to be a javascript source code.
returns equivalent python code."""
# Remove constant literals
no_const, constants = remove_constants(js)
#print 'const count', len(constants)
# Remove object literals
no_obj, objects, obj_count = remove_objects(no_const)
#print 'obj count', len(objects)
# Remove arrays
no_arr, arrays, arr_count = remove_arrays(no_obj)
#print 'arr count', len(arrays)
# Here remove and replace functions
reset_inline_count()
no_func, hoisted, inline = remove_functions(no_arr)
#translate flow and expressions
py_seed, to_register = translate_flow(no_func)
# register variables and hoisted functions
#top += '# register variables\n'
top += 'var.registers(%s)\n' % str(to_register + hoisted.keys())
#Recover functions
# hoisted functions recovery
defs = ''
#defs += '# define hoisted functions\n'
#print len(hoisted) , 'HH'*40
for nested_name, nested_info in hoisted.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func('PyJsLvalTempHoisted', nested_block,
nested_args)
new_code += 'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name)
defs += new_code + '\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr(
nested_name)
#defs += '# Everting ready!\n'
# inline functions recovery
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func(nested_name, nested_block, nested_args)
py_seed = inject_before_lval(py_seed,
nested_name.split('@')[0], new_code)
# add hoisted definitiond - they have literals that have to be recovered
py_seed = defs + py_seed
#Recover arrays
for arr_lval, arr_code in arrays.iteritems():
translation, obj_count, arr_count = translate_array(
arr_code, arr_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, arr_lval, translation)
#Recover objects
for obj_lval, obj_code in objects.iteritems():
translation, obj_count, arr_count = translate_object(
obj_code, obj_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, obj_lval, translation)
#Recover constants
py_code = recover_constants(py_seed, constants)
return top + py_code |
def translate_func(name, block, args):
"""Translates functions and all nested functions to Python code.
name - name of that function (global functions will be available under var while
inline will be available directly under this name )
block - code of the function (*with* brackets {} )
args - arguments that this function takes"""
inline = name.startswith('PyJsLvalInline')
real_name = ''
if inline:
name, real_name = name.split('@')
arglist = ', '.join(args) + ', ' if args else ''
code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist)
# register local variables
scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary
for arg in args:
scope += ', %s:%s' % (repr(arg), arg)
if real_name:
scope += ', %s:%s' % (repr(real_name), name)
code += indent('var = Scope({%s}, var)\n' % scope)
block, nested_hoisted, nested_inline = remove_functions(block)
py_code, to_register = translate_flow(block)
#register variables declared with var and names of hoisted functions.
to_register += nested_hoisted.keys()
if to_register:
code += indent('var.registers(%s)\n' % str(to_register))
for nested_name, info in nested_hoisted.iteritems():
nested_block, nested_args = info
new_code = translate_func('PyJsLvalTempHoisted', nested_block,
nested_args)
# Now put definition of hoisted function on the top
code += indent(new_code)
code += indent(
'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name))
code += indent(
'var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name))
for nested_name, info in nested_inline.iteritems():
nested_block, nested_args = info
new_code = translate_func(nested_name, nested_block, nested_args)
# Inject definitions of inline functions just before usage
# nested inline names have this format : LVAL_NAME@REAL_NAME
py_code = inject_before_lval(py_code,
nested_name.split('@')[0], new_code)
if py_code.strip():
code += indent(py_code)
return code |
def pass_bracket(source, start, bracket='()'):
"""Returns content of brackets with brackets and first pos after brackets
if source[start] is followed by some optional white space and brackets.
Otherwise None"""
e = bracket_split(source[start:], [bracket], False)
try:
cand = e.next()
except StopIteration:
return None, None
if not cand.strip(): #white space...
try:
res = e.next()
return res, start + len(cand) + len(res)
except StopIteration:
return None, None
elif cand[-1] == bracket[1]:
return cand, start + len(cand)
else:
return None, None |
def except_token(source, start, token, throw=True):
"""Token can be only a single char. Returns position after token if found. Otherwise raises syntax error if throw
otherwise returns None"""
start = pass_white(source, start)
if start < len(source) and source[start] == token:
return start + 1
if throw:
raise SyntaxError('Missing token. Expected %s' % token)
return None |
def except_keyword(source, start, keyword):
""" Returns position after keyword if found else None
Note: skips white space"""
start = pass_white(source, start)
kl = len(keyword) #keyword len
if kl + start > len(source):
return None
if source[start:start + kl] != keyword:
return None
if kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:
return None
return start + kl |
def parse_identifier(source, start, throw=True):
"""passes white space from start and returns first identifier,
if identifier invalid and throw raises SyntaxError otherwise returns None"""
start = pass_white(source, start)
end = start
if not end < len(source):
if throw:
raise SyntaxError('Missing identifier!')
return None
if source[end] not in IDENTIFIER_START:
if throw:
raise SyntaxError('Invalid identifier start: "%s"' % source[end])
return None
end += 1
while end < len(source) and source[end] in IDENTIFIER_PART:
end += 1
if not is_valid_lval(source[start:end]):
if throw:
raise SyntaxError(
'Invalid identifier name: "%s"' % source[start:end])
return None
return source[start:end], end |
def argsplit(args, sep=','):
"""used to split JS args (it is not that simple as it seems because
sep can be inside brackets).
pass args *without* brackets!
Used also to parse array and object elements, and more"""
parsed_len = 0
last = 0
splits = []
for e in bracket_split(args, brackets=['()', '[]', '{}']):
if e[0] not in {'(', '[', '{'}:
for i, char in enumerate(e):
if char == sep:
splits.append(args[last:parsed_len + i])
last = parsed_len + i + 1
parsed_len += len(e)
splits.append(args[last:])
return splits |
def split_add_ops(text):
"""Specialized function splitting text at add/sub operators.
Operands are *not* translated. Example result ['op1', '+', 'op2', '-', 'op3']"""
n = 0
text = text.replace('++', '##').replace(
'--', '@@') #text does not normally contain any of these
spotted = False # set to true if noticed anything other than +- or white space
last = 0
while n < len(text):
e = text[n]
if e == '+' or e == '-':
if spotted:
yield text[last:n].replace('##', '++').replace('@@', '--')
yield e
last = n + 1
spotted = False
elif e == '/' or e == '*' or e == '%':
spotted = False
elif e != ' ':
spotted = True
n += 1
yield text[last:n].replace('##', '++').replace('@@', '--') |
def split_at_any(text,
lis,
translate=False,
not_before=[],
not_after=[],
validitate=None):
""" doc """
lis.sort(key=lambda x: len(x), reverse=True)
last = 0
n = 0
text_len = len(text)
while n < text_len:
if any(text[:n].endswith(e)
for e in not_before): #Cant end with end before
n += 1
continue
for e in lis:
s = len(e)
if s + n > text_len:
continue
if validitate and not validitate(e, text[:n], text[n + s:]):
continue
if any(text[n + s:].startswith(e)
for e in not_after): #Cant end with end before
n += 1
break
if e == text[n:n + s]:
yield text[last:n] if not translate else translate(
text[last:n])
yield e
n += s
last = n
break
else:
n += 1
yield text[last:n] if not translate else translate(text[last:n]) |
def split_at_single(text, sep, not_before=[], not_after=[]):
"""Works like text.split(sep) but separated fragments
cant end with not_before or start with not_after"""
n = 0
lt, s = len(text), len(sep)
last = 0
while n < lt:
if not s + n > lt:
if sep == text[n:n + s]:
if any(text[last:n].endswith(e) for e in not_before):
pass
elif any(text[n + s:].startswith(e) for e in not_after):
pass
else:
yield text[last:n]
last = n + s
n += s - 1
n += 1
yield text[last:] |
def to_arr(this):
"""Returns Python array from Js array"""
return [this.get(str(e)) for e in xrange(len(this))] |
def transform_crap(code): #needs some more tests
"""Transforms this ?: crap into if else python syntax"""
ind = code.rfind('?')
if ind == -1:
return code
sep = code.find(':', ind)
if sep == -1:
raise SyntaxError('Invalid ?: syntax (probably missing ":" )')
beg = max(code.rfind(':', 0, ind), code.find('?', 0, ind)) + 1
end = code.find(':', sep + 1)
end = len(code) if end == -1 else end
formula = '(' + code[ind + 1:sep] + ' if ' + code[
beg:ind] + ' else ' + code[sep + 1:end] + ')'
return transform_crap(code[:beg] + formula + code[end:]) |
def rl(self, lis, op):
"""performs this operation on a list from *right to left*
op must take 2 args
a,b,c => op(a, op(b, c))"""
it = reversed(lis)
res = trans(it.next())
for e in it:
e = trans(e)
res = op(e, res)
return res |
def lr(self, lis, op):
"""performs this operation on a list from *left to right*
op must take 2 args
a,b,c => op(op(a, b), c)"""
it = iter(lis)
res = trans(it.next())
for e in it:
e = trans(e)
res = op(res, e)
return res |
def translate(self):
"""Translates outer operation and calls translate on inner operation.
Returns fully translated code."""
if not self.code:
return ''
new = bracket_replace(self.code)
#Check comma operator:
cand = new.split(',') #every comma in new must be an operator
if len(cand) > 1: #LR
return self.lr(cand, js_comma)
#Check = operator:
# dont split at != or !== or == or === or <= or >=
#note <<=, >>= or this >>> will NOT be supported
# maybe I will change my mind later
# Find this crappy ?:
if '?' in new:
cond_ind = new.find('?')
tenary_start = 0
for ass in re.finditer(ASSIGNMENT_MATCH, new):
cand = ass.span()[1]
if cand < cond_ind:
tenary_start = cand
else:
break
actual_tenary = new[tenary_start:]
spl = ''.join(split_at_any(new, [':', '?'], translate=trans))
tenary_translation = transform_crap(spl)
assignment = new[:tenary_start] + ' PyJsConstantTENARY'
return trans(assignment).replace('PyJsConstantTENARY',
tenary_translation)
cand = list(split_at_single(new, '=', ['!', '=', '<', '>'], ['=']))
if len(cand) > 1: # RL
it = reversed(cand)
res = trans(it.next())
for e in it:
e = e.strip()
if not e:
raise SyntaxError('Missing left-hand in assignment!')
op = ''
if e[-2:] in OP_METHODS:
op = ',' + e[-2:].__repr__()
e = e[:-2]
elif e[-1:] in OP_METHODS:
op = ',' + e[-1].__repr__()
e = e[:-1]
e = trans(e)
#Now replace last get method with put and change args
c = list(bracket_split(e, ['()']))
beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip(
) #strips just to make sure... I will remove it later
if beg[-4:] != '.get':
raise SyntaxError('Invalid left-hand side in assignment')
beg = beg[0:-3] + 'put'
arglist = arglist[0:-1] + ', ' + res + op + ')'
res = beg + arglist
return res
#Now check remaining 2 arg operators that are not handled by python
#They all have Left to Right (LR) associativity
order = [OR, AND, BOR, BXOR, BAND, EQS, COMPS, BSHIFTS, ADDS, MULTS]
# actually we dont need OR and AND because they can be handled easier. But just for fun
dangerous = ['<', '>']
for typ in order:
#we have to use special method for ADDS since they can be also unary operation +/++ or -/-- FUCK
if '+' in typ:
cand = list(split_add_ops(new))
else:
#dont translate. cant start or end on dangerous op.
cand = list(
split_at_any(
new,
typ.keys(),
False,
dangerous,
dangerous,
validitate=comb_validitator))
if not len(cand) > 1:
continue
n = 1
res = trans(cand[0])
if not res:
raise SyntaxError("Missing operand!")
while n < len(cand):
e = cand[n]
if not e:
raise SyntaxError("Missing operand!")
if n % 2:
op = typ[e]
else:
res = op(res, trans(e))
n += 1
return res
#Now replace unary operators - only they are left
cand = list(
split_at_any(
new, UNARY.keys(), False, validitate=unary_validitator))
if len(cand) > 1: #contains unary operators
if '++' in cand or '--' in cand: #it cant contain both ++ and --
if '--' in cand:
op = '--'
meths = js_post_dec, js_pre_dec
else:
op = '++'
meths = js_post_inc, js_pre_inc
pos = cand.index(op)
if cand[pos - 1].strip(): # post increment
a = cand[pos - 1]
meth = meths[0]
elif cand[pos + 1].strip(): #pre increment
a = cand[pos + 1]
meth = meths[1]
else:
raise SyntaxError('Invalid use of ++ operator')
if cand[pos + 2:]:
raise SyntaxError('Too many operands')
operand = meth(trans(a))
cand = cand[:pos - 1]
# now last cand should be operand and every other odd element should be empty
else:
operand = trans(cand[-1])
del cand[-1]
for i, e in enumerate(reversed(cand)):
if i % 2:
if e.strip():
raise SyntaxError('Too many operands')
else:
operand = UNARY[e](operand)
return operand
#Replace brackets
if new[0] == '@' or new[0] == '#':
if len(
list(bracket_split(new, ('#{', '@}')))
) == 1: # we have only one bracket, otherwise pseudobracket like @@....
assert new in REPL
if new[0] == '#':
raise SyntaxError(
'[] cant be used as brackets! Use () instead.')
return '(' + trans(REPL[new][1:-1]) + ')'
#Replace function calls and prop getters
# 'now' must be a reference like: a or b.c.d but it can have also calls or getters ( for example a["b"](3))
#From here @@ means a function call and ## means get operation (note they dont have to present)
it = bracket_split(new, ('#{', '@}'))
res = []
for e in it:
if e[0] != '#' and e[0] != '@':
res += [x.strip() for x in e.split('.')]
else:
res += [e.strip()]
# res[0] can be inside @@ (name)...
res = filter(lambda x: x, res)
if is_internal(res[0]):
out = res[0]
elif res[0][0] in {'#', '@'}:
out = '(' + trans(REPL[res[0]][1:-1]) + ')'
elif is_valid_lval(
res[0]) or res[0] in {'this', 'false', 'true', 'null'}:
out = 'var.get(' + res[0].__repr__() + ')'
else:
if is_reserved(res[0]):
raise SyntaxError('Unexpected reserved word: "%s"' % res[0])
raise SyntaxError('Invalid identifier: "%s"' % res[0])
if len(res) == 1:
return out
n = 1
while n < len(res): #now every func call is a prop call
e = res[n]
if e[0] == '@': # direct call
out += trans_args(REPL[e])
n += 1
continue
args = False #assume not prop call
if n + 1 < len(res) and res[n + 1][0] == '@': #prop call
args = trans_args(REPL[res[n + 1]])[1:]
if args != ')':
args = ',' + args
if e[0] == '#':
prop = trans(REPL[e][1:-1])
else:
if not is_lval(e):
raise SyntaxError('Invalid identifier: "%s"' % e)
prop = e.__repr__()
if args: # prop call
n += 1
out += '.callprop(' + prop + args
else: #prop get
out += '.get(' + prop + ')'
n += 1
return out |
def do_statement(source, start):
"""returns none if not found other functions that begin with 'do_' raise
also this do_ type function passes white space"""
start = pass_white(source, start)
# start is the fist position after initial start that is not a white space or \n
if not start < len(source): #if finished parsing return None
return None, start
if any(startswith_keyword(source[start:], e) for e in {'case', 'default'}):
return None, start
rest = source[start:]
for key, meth in KEYWORD_METHODS.iteritems(
): # check for statements that are uniquely defined by their keywords
if rest.startswith(key):
# has to startwith this keyword and the next letter after keyword must be either EOF or not in IDENTIFIER_PART
if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART:
return meth(source, start)
if rest[0] == '{': #Block
return do_block(source, start)
# Now only label and expression left
cand = parse_identifier(source, start, False)
if cand is not None: # it can mean that its a label
label, cand_start = cand
cand_start = pass_white(source, cand_start)
if source[cand_start] == ':':
return do_label(source, start)
return do_expression(source, start) |
def match(self, string, pos):
'''string is of course a py string'''
return self.pat.match(string, int(pos)) |
def call(self, this, args=()):
''' Dont use this method from inside bytecode to call other bytecode. '''
if self.is_native:
_args = SpaceTuple(
args
) # we have to do that unfortunately to pass all the necessary info to the funcs
_args.space = self.space
return self.code(
this, _args
) # must return valid js object - undefined, null, float, unicode, bool, or PyJs
else:
return self.space.exe._call(self, this,
args) |
def is_empty_object(n, last):
"""n may be the inside of block or object"""
if n.strip():
return False
# seems to be but can be empty code
last = last.strip()
markers = {
')',
';',
}
if not last or last[-1] in markers:
return False
return True |
def is_object(n, last):
"""n may be the inside of block or object.
last is the code before object"""
if is_empty_object(n, last):
return True
if not n.strip():
return False
#Object contains lines of code so it cant be an object
if len(argsplit(n, ';')) > 1:
return False
cands = argsplit(n, ',')
if not cands[-1].strip():
return True # {xxxx,} empty after last , it must be an object
for cand in cands:
cand = cand.strip()
# separate each candidate element at : in dict and check whether they are correct...
kv = argsplit(cand, ':')
if len(
kv
) > 2: # set the len of kv to 2 because of this stupid : expression
kv = kv[0], ':'.join(kv[1:])
if len(kv) == 2:
# key value pair, check whether not label or ?:
k, v = kv
if not is_lval(k.strip()):
return False
v = v.strip()
if v.startswith('function'):
continue
#will fail on label... {xxx: while {}}
if v[0] == '{': # value cant be a code block
return False
for e in KEYWORD_METHODS:
# if v starts with any statement then return false
if v.startswith(e) and len(e) < len(v) and v[len(
e)] not in IDENTIFIER_PART:
return False
elif not (cand.startswith('set ') or cand.startswith('get ')):
return False
return True |
def remove_objects(code, count=1):
""" This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.
count arg is the number that should be added to the LVAL of the first replaced object
"""
replacements = {} #replacement dict
br = bracket_split(code, ['{}', '[]'])
res = ''
last = ''
for e in br:
#test whether e is an object
if e[0] == '{':
n, temp_rep, cand_count = remove_objects(e[1:-1], count)
# if e was not an object then n should not contain any :
if is_object(n, last):
#e was an object
res += ' ' + OBJECT_LVAL % count
replacements[OBJECT_LVAL % count] = e
count += 1
else:
# e was just a code block but could contain objects inside
res += '{%s}' % n
count = cand_count
replacements.update(temp_rep)
elif e[0] == '[':
if is_array(last):
res += e # will be translated later
else: # prop get
n, rep, count = remove_objects(e[1:-1], count)
res += '[%s]' % n
replacements.update(rep)
else: # e does not contain any objects
res += e
last = e #needed to test for this stipid empty object
return res, replacements, count |
def remove_arrays(code, count=1):
"""removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects"""
res = ''
last = ''
replacements = {}
for e in bracket_split(code, ['[]']):
if e[0] == '[':
if is_array(last):
name = ARRAY_LVAL % count
res += ' ' + name
replacements[name] = e
count += 1
else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array
cand, new_replacements, count = remove_arrays(e[1:-1], count)
res += '[%s]' % cand
replacements.update(new_replacements)
else:
res += e
last = e
return res, replacements, count |
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
array, obj_rep, obj_count = remove_objects(array, obj_count)
array, arr_rep, arr_count = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
array, hoisted, inline = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None')
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr
for lval, obj in obj_rep.iteritems():
new_def, obj_count, arr_count = translate_object(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
for lval, obj in arr_rep.iteritems():
new_def, obj_count, arr_count = translate_array(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
return arr, obj_count, arr_count |
def _ensure_regexp(source, n): #<- this function has to be improved
'''returns True if regexp starts at n else returns False
checks whether it is not a division '''
markers = '(+~"\'=[%:?!*^|&-,;/\\'
k = 0
while True:
k += 1
if n - k < 0:
return True
char = source[n - k]
if char in markers:
return True
if char != ' ' and char != '\n':
break
return False |
def parse_num(source, start, charset):
"""Returns a first index>=start of chat not in charset"""
while start < len(source) and source[start] in charset:
start += 1
return start |
def parse_exponent(source, start):
"""returns end of exponential, raises SyntaxError if failed"""
if not source[start] in {'e', 'E'}:
if source[start] in IDENTIFIER_PART:
raise SyntaxError('Invalid number literal!')
return start
start += 1
if source[start] in {'-', '+'}:
start += 1
FOUND = False
# we need at least one dig after exponent
while source[start] in NUMS:
FOUND = True
start += 1
if not FOUND or source[start] in IDENTIFIER_PART:
raise SyntaxError('Invalid number literal!')
return start |
def remove_constants(source):
'''Replaces Strings and Regexp literals in the source code with
identifiers and *removes comments*. Identifier is of the format:
PyJsStringConst(String const number)_ - for Strings
PyJsRegExpConst(RegExp const number)_ - for RegExps
Returns dict which relates identifier and replaced constant.
Removes single line and multiline comments from JavaScript source code
Pseudo comments (inside strings) will not be removed.
For example this line:
var x = "/*PSEUDO COMMENT*/ TEXT //ANOTHER PSEUDO COMMENT"
will be unaltered'''
source = ' ' + source + '\n'
comments = []
inside_comment, single_comment = False, False
inside_single, inside_double = False, False
inside_regexp = False
regexp_class_count = 0
n = 0
while n < len(source):
char = source[n]
if char == '"' and not (inside_comment or inside_single
or inside_regexp):
if not _is_cancelled(source, n):
if inside_double:
inside_double[1] = n + 1
comments.append(inside_double)
inside_double = False
else:
inside_double = [n, None, 0]
elif char == "'" and not (inside_comment or inside_double
or inside_regexp):
if not _is_cancelled(source, n):
if inside_single:
inside_single[1] = n + 1
comments.append(inside_single)
inside_single = False
else:
inside_single = [n, None, 0]
elif (inside_single or inside_double):
if char in LINE_TERMINATOR:
if _is_cancelled(source, n):
if char == CR and source[n + 1] == LF:
n += 1
n += 1
continue
else:
raise SyntaxError(
'Invalid string literal. Line terminators must be escaped!'
)
else:
if inside_comment:
if single_comment:
if char in LINE_TERMINATOR:
inside_comment[1] = n
comments.append(inside_comment)
inside_comment = False
single_comment = False
else: # Multiline
if char == '/' and source[n - 1] == '*':
inside_comment[1] = n + 1
comments.append(inside_comment)
inside_comment = False
elif inside_regexp:
if not quiting_regexp:
if char in LINE_TERMINATOR:
raise SyntaxError(
'Invalid regexp literal. Line terminators cant appear!'
)
if _is_cancelled(source, n):
n += 1
continue
if char == '[':
regexp_class_count += 1
elif char == ']':
regexp_class_count = max(regexp_class_count - 1, 0)
elif char == '/' and not regexp_class_count:
quiting_regexp = True
else:
if char not in IDENTIFIER_START:
inside_regexp[1] = n
comments.append(inside_regexp)
inside_regexp = False
elif char == '/' and source[n - 1] == '/':
single_comment = True
inside_comment = [n - 1, None, 1]
elif char == '*' and source[n - 1] == '/':
inside_comment = [n - 1, None, 1]
elif char == '/' and source[n + 1] not in ('/', '*'):
if not _ensure_regexp(source, n): #<- improve this one
n += 1
continue #Probably just a division
quiting_regexp = False
inside_regexp = [n, None, 2]
elif not (inside_comment or inside_regexp):
if (char in NUMS and
source[n - 1] not in IDENTIFIER_PART) or char == '.':
if char == '.':
k = parse_num(source, n + 1, NUMS)
if k == n + 1: # just a stupid dot...
n += 1
continue
k = parse_exponent(source, k)
elif char == '0' and source[n + 1] in {
'x', 'X'
}: #Hex number probably
k = parse_num(source, n + 2, HEX)
if k == n + 2 or source[k] in IDENTIFIER_PART:
raise SyntaxError('Invalid hex literal!')
else: #int or exp or flot or exp flot
k = parse_num(source, n + 1, NUMS)
if source[k] == '.':
k = parse_num(source, k + 1, NUMS)
k = parse_exponent(source, k)
comments.append((n, k, 3))
n = k
continue
n += 1
res = ''
start = 0
count = 0
constants = {}
for end, next_start, typ in comments:
res += source[start:end]
start = next_start
if typ == 0: # String
name = StringName
elif typ == 1: # comment
continue
elif typ == 2: # regexp
name = RegExpName
elif typ == 3: # number
name = NumberName
else:
raise RuntimeError()
res += ' ' + name % count + ' '
constants[name % count] = source[end:next_start]
count += 1
res += source[start:]
# remove this stupid white space
for e in WHITE:
res = res.replace(e, ' ')
res = res.replace(CR + LF, '\n')
for e in LINE_TERMINATOR:
res = res.replace(e, '\n')
return res.strip(), constants |
def recover_constants(py_source,
replacements): #now has n^2 complexity. improve to n
'''Converts identifiers representing Js constants to the PyJs constants
PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)'''
for identifier, value in replacements.iteritems():
if identifier.startswith('PyJsConstantRegExp'):
py_source = py_source.replace(identifier,
'JsRegExp(%s)' % repr(value))
elif identifier.startswith('PyJsConstantString'):
py_source = py_source.replace(
identifier, 'Js(u%s)' % unify_string_literals(value))
else:
py_source = py_source.replace(identifier, 'Js(%s)' % value)
return py_source |
def unify_string_literals(js_string):
"""this function parses the string just like javascript
for example literal '\d' in JavaScript would be interpreted
as 'd' - backslash would be ignored and in Pyhon this
would be interpreted as '\\d' This function fixes this problem."""
n = 0
res = ''
limit = len(js_string)
while n < limit:
char = js_string[n]
if char == '\\':
new, n = do_escape(js_string, n)
res += new
else:
res += char
n += 1
return res |
def do_escape(source, n):
"""Its actually quite complicated to cover every case :)
http://www.javascriptkit.com/jsref/escapesequence.shtml"""
if not n + 1 < len(source):
return '' # not possible here but can be possible in general case.
if source[n + 1] in LINE_TERMINATOR:
if source[n + 1] == CR and n + 2 < len(source) and source[n + 2] == LF:
return source[n:n + 3], n + 3
return source[n:n + 2], n + 2
if source[n + 1] in ESCAPE_CHARS:
return source[n:n + 2], n + 2
if source[n + 1] in {'x', 'u'}:
char, length = ('u', 4) if source[n + 1] == 'u' else ('x', 2)
n += 2
end = parse_num(source, n, HEX)
if end - n < length:
raise SyntaxError('Invalid escape sequence!')
#if length==4:
# return unichr(int(source[n:n+4], 16)), n+4 # <- this was a very bad way of solving this problem :)
return source[n - 2:n + length], n + length
if source[n + 1] in OCTAL:
n += 1
end = parse_num(source, n, OCTAL)
end = min(end, n + 3) # cant be longer than 3
# now the max allowed is 377 ( in octal) and 255 in decimal
max_num = 255
num = 0
len_parsed = 0
for e in source[n:end]:
cand = 8 * num + int(e)
if cand > max_num:
break
num = cand
len_parsed += 1
# we have to return in a different form because python may want to parse more...
# for example '\777' will be parsed by python as a whole while js will use only \77
return '\\' + hex(num)[1:], n + len_parsed
return source[n + 1], n + 2 |
def abstract_relational_comparison(self, other,
self_first=True): # todo speed up!
''' self<other if self_first else other<self.
Returns the result of the question: is self smaller than other?
in case self_first is false it returns the answer of:
is other smaller than self.
result is PyJs type: bool or undefined'''
px = to_primitive(self, 'Number')
py = to_primitive(other, 'Number')
if not self_first: # reverse order
px, py = py, px
if not (Type(px) == 'String' and Type(py) == 'String'):
px, py = to_number(px), to_number(py)
if is_nan(px) or is_nan(py):
return None # watch out here!
return px < py # same cmp algorithm
else:
# I am pretty sure that python has the same
# string cmp algorithm but I have to confirm it
return px < py |
def abstract_equality_op(self, other):
''' returns the result of JS == compare.
result is PyJs type: bool'''
tx, ty = Type(self), Type(other)
if tx == ty:
if tx == 'Undefined' or tx == 'Null':
return True
if tx == 'Number' or tx == 'String' or tx == 'Boolean':
return self == other
return self is other # Object
elif (tx == 'Undefined' and ty == 'Null') or (ty == 'Undefined'
and tx == 'Null'):
return True
elif tx == 'Number' and ty == 'String':
return abstract_equality_op(self, to_number(other))
elif tx == 'String' and ty == 'Number':
return abstract_equality_op(to_number(self), other)
elif tx == 'Boolean':
return abstract_equality_op(to_number(self), other)
elif ty == 'Boolean':
return abstract_equality_op(self, to_number(other))
elif (tx == 'String' or tx == 'Number') and is_object(other):
return abstract_equality_op(self, to_primitive(other))
elif (ty == 'String' or ty == 'Number') and is_object(self):
return abstract_equality_op(to_primitive(self), other)
else:
return False |
def in_op(self, other):
'''checks if self is in other'''
if not is_object(other):
raise MakeError(
'TypeError',
"You can\'t use 'in' operator to search in non-objects")
return other.has_property(to_string(self)) |
def maybe_download_and_extract():
"""Download and extract processed data and embeddings."""
dest_directory = '.'
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'trees')
if not os.path.exists(extracted_dir_path):
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(dest_directory)
zip_ref.close() |
def make_network_graph(compact, expression_names, lookup_names):
"""
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
"""
nodes = set()
# edges = defaultdict(set) # parent -> (child, extra)
var_name_dict = dict()
if expression_names:
for e in graphviz_items: # e: Expression
if e in expression_names:
var_name_dict[e.vindex] = expression_names[e]
rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A')))
def vidx2str(vidx): return '%s%s' % ('N', vidx)
for e in graphviz_items: # e: Expression
vidx = e.vindex
f_name = e.name
args = e.args
output_dim = e.dim
input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim
children = set()
node_type = '2_regular'
if f_name == 'vecInput':
[_dim] = args
arg_strs = []
elif f_name == 'inputVector':
[_v] = args
arg_strs = []
elif f_name == 'matInput':
[_d1, _d2] = args
arg_strs = []
elif f_name == 'inputMatrix':
[_v, _d] = args
arg_strs = []
elif f_name == 'parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup_parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup':
[p, idx, update] = args
[_dim] = p.args
if vidx in var_name_dict:
name = var_name_dict[vidx]
else:
name = None
item_name = None
if lookup_names and p in expression_names:
param_name = expression_names[p]
if param_name in lookup_names:
item_name = '\\"%s\\"' % (lookup_names[param_name][idx],)
if compact:
if item_name is not None:
f_name = item_name
elif name is not None:
f_name = '%s[%s]' % (name, idx)
else:
f_name = 'lookup(%s)' % (idx)
arg_strs = []
else:
arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))]
if item_name is not None:
arg_strs.append(item_name)
vocab_size = _dim[0]
arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed'])
#children.add(vidx2str(p.vindex))
#node_type = '1_param'
elif f_name == 'RNNState':
[arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e
rnn_name = rnn_bldr_name[bldr_num]
if bldr_type.endswith('Builder'):
bldr_type[:-len('Builder')]
f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx)
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs = [s]
else:
arg_strs = []
children.add(vidx2str(arg.vindex))
node_type = '3_rnn_state'
else:
arg_strs = []
for arg in args:
if isinstance(arg, Expression):
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs.append(s)
children.add(vidx2str(arg.vindex))
elif isinstance(arg, float) and compact:
s = re.sub('0+$', '', '%.3f' % (arg))
if s == '0.':
s = str(arg)
arg_strs.append(s)
else:
arg_strs.append(str(arg))
# f_name = { ,
# }.get(f_name, f_name)
if compact:
f_name = { 'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'cadd': '+',
'cmul': '*',
'cdiv': '/',
'scalarsub': '-',
'concatenate': 'cat',
'esum': 'sum',
'emax': 'max',
'emin': 'min',
}.get(f_name, f_name)
if arg_strs:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
elif f_name == 'add':
[a,b] = arg_strs
str_repr = '%s + %s' % (a,b)
elif f_name == 'sub':
[a,b] = arg_strs
str_repr = '%s - %s' % (a,b)
elif f_name == 'mul':
[a,b] = arg_strs
str_repr = '%s * %s' % (a,b)
elif f_name == 'div':
[a,b] = arg_strs
str_repr = '%s / %s' % (a,b)
elif f_name == 'neg':
[a,] = arg_strs
str_repr = '-%s' % (a)
elif f_name == 'affine_transform':
str_repr = arg_strs[0]
for i in xrange(1, len(arg_strs), 2):
str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2])
else:
if arg_strs is not None:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
name = vidx2str(vidx)
var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else ''
# if show_dims:
# str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr)
label = str_repr
if not compact:
label = '%s = %s' % (var_name, label)
features = ''
# if output_dim.invalid():
# features += " [color=red,style=filled,fillcolor=red]"
# node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))
expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None
nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name))
return nodes |
def add_inputs(self, es):
"""
returns the list of state pairs (stateF, stateB) obtained by adding
inputs to both forward (stateF) and backward (stateB) RNNs.
@param es: a list of Expression
see also transduce(xs)
.transduce(xs) is different from .add_inputs(xs) in the following way:
.add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be
queried in various ways. In particular, they allow access to the previous
state, as well as to the state-vectors (h() and s() )
.transduce(xs) returns a list of Expression. These are just the output
expressions. For many cases, this suffices.
transduce is much more memory efficient than add_inputs.
"""
for e in es:
ensure_freshness(e)
for (fb,bb) in self.builder_layers[:-1]:
fs = fb.initial_state().transduce(es)
bs = bb.initial_state().transduce(reversed(es))
es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]
(fb,bb) = self.builder_layers[-1]
fs = fb.initial_state().add_inputs(es)
bs = bb.initial_state().add_inputs(reversed(es))
return [(f,b) for f,b in zip(fs, reversed(bs))] |
def transduce(self, es):
"""
returns the list of output Expressions obtained by adding the given inputs
to the current state, one by one, to both the forward and backward RNNs,
and concatenating.
@param es: a list of Expression
see also add_inputs(xs)
.transduce(xs) is different from .add_inputs(xs) in the following way:
.add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be
queried in various ways. In particular, they allow access to the previous
state, as well as to the state-vectors (h() and s() )
.transduce(xs) returns a list of Expression. These are just the output
expressions. For many cases, this suffices.
transduce is much more memory efficient than add_inputs.
"""
for e in es:
ensure_freshness(e)
for (fb,bb) in self.builder_layers:
fs = fb.initial_state().transduce(es)
bs = bb.initial_state().transduce(reversed(es))
es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]
return es |
def add_inputs(self, xs):
"""
returns the list of states obtained by adding the given inputs
to the current state, one by one.
"""
states = []
cur = self
for x in xs:
cur = cur.add_input(x)
states.append(cur)
return states |
def load_mnist(dataset, path):
"""
wget -O - http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | gunzip > train-images-idx3-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | gunzip > train-labels-idx1-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip > t10k-images-idx3-ubyte
wget -O - http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip > t10k-labels-idx1-ubyte
"""
if dataset is "training":
fname_img = os.path.join(path, "train-images-idx3-ubyte")
fname_lbl = os.path.join(path, "train-labels-idx1-ubyte")
elif dataset is "testing":
fname_img = os.path.join(path, "t10k-images-idx3-ubyte")
fname_lbl = os.path.join(path, "t10k-labels-idx1-ubyte")
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in numpy arrays
with open(fname_lbl, "rb") as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
labels = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, "rb") as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
images = np.multiply(
np.fromfile(fimg, dtype=np.uint8).reshape(len(labels), rows*cols),
1.0 / 255.0)
get_instance = lambda idx: (labels[idx], images[idx].reshape(1, 28, 28))
# Create an iterator which returns each image in turn
# for i in range(len(labels)):
# yield get_instance(i)
size_reset = lambda x: x.reshape(1, 28, 28)
return list(map(size_reset, images)) |
def make_grid(tensor, nrow=8, padding=2, pad_value=0):
"""Make a grid of images, via numpy.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
pad_value (float, optional): Value for the padded pixels.
"""
if not (isinstance(tensor, np.ndarray) or
(isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = np.stack(tensor, 0)
if tensor.ndim == 2: # single image H x W
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1]))
if tensor.ndim == 3:
if tensor.shape[0] == 1: # if single-channel, single image, convert to 3-channel
tensor = np.concatenate((tensor, tensor, tensor), 0)
tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2]))
if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images
tensor = np.concatenate((tensor, tensor, tensor), 1)
if tensor.shape[0] == 1:
return np.squeeze(tensor)
# make the mini-batch of images into a grid
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)
grid = np.ones((3, height * ymaps + padding, width * xmaps + padding)) * pad_value
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
grid[:, y * height + padding:(y+1) * height,\
x * width + padding:(x+1) * width] = tensor[k]
k = k + 1
return grid |
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)
im = Image.fromarray(pre_pillow_float_img_process(grid))
im.save(filename) |
def pythonize_arguments(arg_str):
"""
Remove types from function arguments in cython
"""
out_args = []
# If there aren't any arguments return the empty string
if arg_str is None:
return out_str
args = arg_str.split(',')
for arg in args:
components = arg.split('=')
name_and_type=components[0].split(' ')
# There is probably type info
if name_and_type[-1]=='' and len(name_and_type)>1:
name=name_and_type[-2]
else:
name=name_and_type[-1]
# if there are default parameters
if len(components)>1:
name+='='+components[1]
out_args.append(name)
return ','.join(out_args) |
def correspond(text):
"""Communicate with the child process without closing stdin."""
if text:
subproc.stdin.write(text)
subproc.stdin.flush()
return get_lines() |
def _normalize(c):
"""
Convert a byte-like value into a canonical byte (a value of type 'bytes' of len 1)
:param c:
:return:
"""
if isinstance(c, int):
return bytes([c])
elif isinstance(c, str):
return bytes([ord(c)])
else:
return c |
def _set_perms(self, perms):
"""
Sets the access permissions of the map.
:param perms: the new permissions.
"""
assert isinstance(perms, str) and len(perms) <= 3 and perms.strip() in ['', 'r', 'w', 'x', 'rw', 'r x', 'rx', 'rwx', 'wx', ]
self._perms = perms |
def access_ok(self, access):
""" Check if there is enough permissions for access """
for c in access:
if c not in self.perms:
return False
return True |
def _in_range(self, index):
""" Returns True if index is in range """
if isinstance(index, slice):
in_range = index.start < index.stop and \
index.start >= self.start and \
index.stop <= self.end
else:
in_range = index >= self.start and \
index <= self.end
return in_range |
def _get_offset(self, index):
"""
Translates the index to the internal offsets.
self.start -> 0
self.start+1 -> 1
...
self.end -> len(self)
"""
if not self._in_range(index):
raise IndexError('Map index out of range')
if isinstance(index, slice):
index = slice(index.start - self.start, index.stop - self.start)
else:
index -= self.start
return index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.