INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
connect to peers. peers will be a 3 - tuples of the form: ( location north_addr east_addr ) as produced by | def connect(self, south_peer=None, west_peer=None):
"""connect to peers. `peers` will be a 3-tuples, of the form:
(location, north_addr, east_addr)
as produced by
"""
if south_peer is not None:
location, url, _ = south_peer
self.south.connect(disambiguate_url(url, location))
if west_peer is not None:
location, _, url = west_peer
self.west.connect(disambiguate_url(url, location)) |
convert. pyx extensions to. c | def _convert_pyx_sources_to_c(self):
"convert .pyx extensions to .c"
def pyx_to_c(source):
if source.endswith('.pyx'):
source = source[:-4] + '.c'
return source
self.sources = map(pyx_to_c, self.sources) |
watch iopub channel and print messages | def main(connection_file):
"""watch iopub channel, and print messages"""
ctx = zmq.Context.instance()
with open(connection_file) as f:
cfg = json.loads(f.read())
location = cfg['location']
reg_url = cfg['url']
session = Session(key=str_to_bytes(cfg['exec_key']))
query = ctx.socket(zmq.DEALER)
query.connect(disambiguate_url(cfg['url'], location))
session.send(query, "connection_request")
idents,msg = session.recv(query, mode=0)
c = msg['content']
iopub_url = disambiguate_url(c['iopub'], location)
sub = ctx.socket(zmq.SUB)
# This will subscribe to all messages:
sub.setsockopt(zmq.SUBSCRIBE, b'')
# replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout
# 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes
# to everything from engine 1, but there is no way to subscribe to
# just stdout from everyone.
# multiple calls to subscribe will add subscriptions, e.g. to subscribe to
# engine 1's stderr and engine 2's stdout:
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr')
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout')
sub.connect(iopub_url)
while True:
try:
idents,msg = session.recv(sub, mode=0)
except KeyboardInterrupt:
return
# ident always length 1 here
topic = idents[0]
if msg['msg_type'] == 'stream':
# stdout/stderr
# stream names are in msg['content']['name'], if you want to handle
# them differently
print("%s: %s" % (topic, msg['content']['data']))
elif msg['msg_type'] == 'pyerr':
# Python traceback
c = msg['content']
print(topic + ':')
for line in c['traceback']:
# indent lines
print(' ' + line) |
decorator ( caller ) converts a caller function into a decorator ; decorator ( caller func ) decorates a function using a caller. | def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
# otherwise assume caller is a function
first = inspect.getargspec(caller)[0][0] # first arg
evaldict = caller.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__) |
Create a package finder appropriate to this install command. This method is meant to be overridden by subclasses not called directly. | def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
) |
Method decorator for catching invalid config ( Trait/ ArgumentErrors ) during init. | def catch_config_error(method, app, *args, **kwargs):
"""Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
"""
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_description()
app.print_help()
app.print_examples()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1) |
Helper for building basic -- trait -- no - trait flags. | def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True"%configurable
unset_help = unset_help or "set %s=False"%configurable
cls,trait = configurable.split('.')
setter = {cls : {trait : True}}
unsetter = {cls : {trait : False}}
return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)} |
Adjust the log level when log_level is set. | def _log_level_changed(self, name, old, new):
"""Adjust the log level when log_level is set."""
if isinstance(new, basestring):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new) |
Start logging for this application. | def _log_default(self):
"""Start logging for this application.
The default is to log to stdout using a StreaHandler. The log level
starts at loggin.WARN, but this can be adjusted by setting the
``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = logging.Formatter(self.log_format)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log |
ensure flags dict is valid | def _flags_changed(self, name, old, new):
"""ensure flags dict is valid"""
for key,value in new.iteritems():
assert len(value) == 2, "Bad flag: %r:%s"%(key,value)
assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s"%(key,value)
assert isinstance(value[1], basestring), "Bad flag: %r:%s"%(key,value) |
Print the alias part of the help. | def print_alias_help(self):
"""Print the alias part of the help."""
if not self.aliases:
return
lines = []
classdict = {}
for cls in self.classes:
# include all parents (up to, but excluding Configurable) in available names
for c in cls.mro()[:-3]:
classdict[c.__name__] = c
for alias, longname in self.aliases.iteritems():
classname, traitname = longname.split('.',1)
cls = classdict[classname]
trait = cls.class_traits(config=True)[traitname]
help = cls.class_get_trait_help(trait).splitlines()
# reformat first line
help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
if len(alias) == 1:
help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
lines.extend(help)
# lines.append('')
print os.linesep.join(lines) |
Print the flag part of the help. | def print_flag_help(self):
"""Print the flag part of the help."""
if not self.flags:
return
lines = []
for m, (cfg,help) in self.flags.iteritems():
prefix = '--' if len(m) > 1 else '-'
lines.append(prefix+m)
lines.append(indent(dedent(help.strip())))
# lines.append('')
print os.linesep.join(lines) |
Print the subcommand part of the help. | def print_subcommands(self):
"""Print the subcommand part of the help."""
if not self.subcommands:
return
lines = ["Subcommands"]
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.subcommand_description):
lines.append(p)
lines.append('')
for subc, (cls, help) in self.subcommands.iteritems():
lines.append(subc)
if help:
lines.append(indent(dedent(help.strip())))
lines.append('')
print os.linesep.join(lines) |
Print the help for each Configurable class in self. classes. | def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print "Class parameters"
print "----------------"
print
for p in wrap_paragraphs(self.keyvalue_description):
print p
print
for cls in self.classes:
cls.class_print_help()
print
else:
print "To see all available configurables, use `--help-all`"
print |
Print usage and examples. | def print_examples(self):
"""Print usage and examples.
This usage string goes at the end of the command line help string
and should contain examples of the application's usage.
"""
if self.examples:
print "Examples"
print "--------"
print
print indent(dedent(self.examples.strip()))
print |
Fire the traits events when the config is updated. | def update_config(self, config):
"""Fire the traits events when the config is updated."""
# Save a copy of the current config.
newconfig = deepcopy(self.config)
# Merge the new config into the current one.
newconfig._merge(config)
# Save the combined config as self.config, which triggers the traits
# events.
self.config = newconfig |
Initialize a subcommand with argv. | def initialize_subcommand(self, subc, argv=None):
"""Initialize a subcommand with argv."""
subapp,help = self.subcommands.get(subc)
if isinstance(subapp, basestring):
subapp = import_item(subapp)
# clear existing instances
self.__class__.clear_instance()
# instantiate
self.subapp = subapp.instance()
# and initialize subapp
self.subapp.initialize(argv) |
flatten flags and aliases so cl - args override as expected. This prevents issues such as an alias pointing to InteractiveShell but a config file setting the same trait in TerminalInteraciveShell getting inappropriate priority over the command - line arg. | def flatten_flags(self):
"""flatten flags and aliases, so cl-args override as expected.
This prevents issues such as an alias pointing to InteractiveShell,
but a config file setting the same trait in TerminalInteraciveShell
getting inappropriate priority over the command-line arg.
Only aliases with exactly one descendent in the class list
will be promoted.
"""
# build a tree of classes in our list that inherit from a particular
# it will be a dict by parent classname of classes in our list
# that are descendents
mro_tree = defaultdict(list)
for cls in self.classes:
clsname = cls.__name__
for parent in cls.mro()[1:-3]:
# exclude cls itself and Configurable,HasTraits,object
mro_tree[parent.__name__].append(clsname)
# flatten aliases, which have the form:
# { 'alias' : 'Class.trait' }
aliases = {}
for alias, cls_trait in self.aliases.iteritems():
cls,trait = cls_trait.split('.',1)
children = mro_tree[cls]
if len(children) == 1:
# exactly one descendent, promote alias
cls = children[0]
aliases[alias] = '.'.join([cls,trait])
# flatten flags, which are of the form:
# { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
flags = {}
for key, (flagdict, help) in self.flags.iteritems():
newflag = {}
for cls, subdict in flagdict.iteritems():
children = mro_tree[cls]
# exactly one descendent, promote flag section
if len(children) == 1:
cls = children[0]
newflag[cls] = subdict
flags[key] = (newflag, help)
return flags, aliases |
Parse the command line arguments. | def parse_command_line(self, argv=None):
"""Parse the command line arguments."""
argv = sys.argv[1:] if argv is None else argv
if argv and argv[0] == 'help':
# turn `ipython help notebook` into `ipython notebook -h`
argv = argv[1:] + ['-h']
if self.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
# it's a subcommand, and *not* a flag or class parameter
return self.initialize_subcommand(subc, subargv)
if '-h' in argv or '--help' in argv or '--help-all' in argv:
self.print_description()
self.print_help('--help-all' in argv)
self.print_examples()
self.exit(0)
if '--version' in argv or '-V' in argv:
self.print_version()
self.exit(0)
# flatten flags&aliases, so cl-args get appropriate priority:
flags,aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args |
Load a. py based config file by filename and path. | def load_config_file(self, filename, path=None):
"""Load a .py based config file by filename and path."""
loader = PyFileConfigLoader(filename, path=path)
try:
config = loader.load_config()
except ConfigFileNotFound:
# problem finding the file, raise
raise
except Exception:
# try to get the full filename, but it will be empty in the
# unlikely event that the error raised before filefind finished
filename = loader.full_filename or filename
# problem while running the file
self.log.error("Exception while loading config file %s",
filename, exc_info=True)
else:
self.log.debug("Loaded config file: %s", loader.full_filename)
self.update_config(config) |
generate default config file from Configurables | def generate_config_file(self):
"""generate default config file from Configurables"""
lines = ["# Configuration file for %s."%self.name]
lines.append('')
lines.append('c = get_config()')
lines.append('')
for cls in self.classes:
lines.append(cls.class_config_section())
return '\n'.join(lines) |
Choose k random elements of array. | def downsample(array, k):
"""Choose k random elements of array."""
length = array.shape[0]
indices = random.sample(xrange(length), k)
return array[indices] |
Produce a sequence of formatted lines from info. | def info_formatter(info):
"""Produce a sequence of formatted lines from info.
`info` is a sequence of pairs (label, data). The produced lines are
nicely formatted, ready to print.
"""
label_len = max([len(l) for l, _d in info])
for label, data in info:
if data == []:
data = "-none-"
if isinstance(data, (list, tuple)):
prefix = "%*s:" % (label_len, label)
for e in data:
yield "%*s %s" % (label_len+1, prefix, e)
prefix = ""
else:
yield "%*s: %s" % (label_len, label, data) |
Write a line of debug output. | def write(self, msg):
"""Write a line of debug output."""
if self.should('pid'):
msg = "pid %5d: %s" % (os.getpid(), msg)
self.output.write(msg+"\n")
self.output.flush() |
Update all the class traits having config = True as metadata. | def _config_changed(self, name, old, new):
"""Update all the class traits having ``config=True`` as metadata.
For any class trait with a ``config`` metadata attribute that is
``True``, we update the trait with the value of the corresponding
config entry.
"""
# Get all traits with a config metadata entry that is True
traits = self.traits(config=True)
# We auto-load config section for this class as well as any parent
# classes that are Configurable subclasses. This starts with Configurable
# and works down the mro loading the config for each section.
section_names = [cls.__name__ for cls in \
reversed(self.__class__.__mro__) if
issubclass(cls, Configurable) and issubclass(self.__class__, cls)]
for sname in section_names:
# Don't do a blind getattr as that would cause the config to
# dynamically create the section with name self.__class__.__name__.
if new._has_section(sname):
my_config = new[sname]
for k, v in traits.iteritems():
# Don't allow traitlets with config=True to start with
# uppercase. Otherwise, they are confused with Config
# subsections. But, developers shouldn't have uppercase
# attributes anyways! (PEP 6)
if k[0].upper()==k[0] and not k.startswith('_'):
raise ConfigurableError('Configurable traitlets with '
'config=True must start with a lowercase so they are '
'not confused with Config subsections: %s.%s' % \
(self.__class__.__name__, k))
try:
# Here we grab the value from the config
# If k has the naming convention of a config
# section, it will be auto created.
config_value = my_config[k]
except KeyError:
pass
else:
# print "Setting %s.%s from %s.%s=%r" % \
# (self.__class__.__name__,k,sname,k,config_value)
# We have to do a deepcopy here if we don't deepcopy the entire
# config object. If we don't, a mutable config_value will be
# shared by all instances, effectively making it a class attribute.
setattr(self, k, deepcopy(config_value)) |
Get the help string for this class in ReST format. If inst is given it s current trait values will be used in place of class defaults. | def class_get_help(cls, inst=None):
"""Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
cls_traits = cls.class_traits(config=True)
final_help = []
final_help.append(u'%s options' % cls.__name__)
final_help.append(len(final_help[0])*u'-')
for k,v in sorted(cls.class_traits(config=True).iteritems()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return '\n'.join(final_help) |
Get the help string for a single trait. If inst is given it s current trait values will be used in place of the class default. | def class_get_trait_help(cls, trait, inst=None):
"""Get the help string for a single trait.
If `inst` is given, it's current trait values will be used in place of
the class default.
"""
assert inst is None or isinstance(inst, cls)
lines = []
header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__)
lines.append(header)
if inst is not None:
lines.append(indent('Current: %r' % getattr(inst, trait.name), 4))
else:
try:
dvr = repr(trait.get_default_value())
except Exception:
dvr = None # ignore defaults we can't construct
if dvr is not None:
if len(dvr) > 64:
dvr = dvr[:61]+'...'
lines.append(indent('Default: %s' % dvr, 4))
if 'Enum' in trait.__class__.__name__:
# include Enum choices
lines.append(indent('Choices: %r' % (trait.values,)))
help = trait.get_metadata('help')
if help is not None:
help = '\n'.join(wrap_paragraphs(help, 76))
lines.append(indent(help, 4))
return '\n'.join(lines) |
Get the config class config section | def class_config_section(cls):
"""Get the config class config section"""
def c(s):
"""return a commented, wrapped block."""
s = '\n\n'.join(wrap_paragraphs(s, 78))
return '# ' + s.replace('\n', '\n# ')
# section header
breaker = '#' + '-'*78
s = "# %s configuration"%cls.__name__
lines = [breaker, s, breaker, '']
# get the description trait
desc = cls.class_traits().get('description')
if desc:
desc = desc.default_value
else:
# no description trait, use __doc__
desc = getattr(cls, '__doc__', '')
if desc:
lines.append(c(desc))
lines.append('')
parents = []
for parent in cls.mro():
# only include parents that are not base classes
# and are not the class itself
# and have some configurable traits to inherit
if parent is not cls and issubclass(parent, Configurable) and \
parent.class_traits(config=True):
parents.append(parent)
if parents:
pstr = ', '.join([ p.__name__ for p in parents ])
lines.append(c('%s will inherit config from: %s'%(cls.__name__, pstr)))
lines.append('')
for name,trait in cls.class_traits(config=True).iteritems():
help = trait.get_metadata('help') or ''
lines.append(c(help))
lines.append('# c.%s.%s = %r'%(cls.__name__, name, trait.get_default_value()))
lines.append('')
return '\n'.join(lines) |
Walk the cls. mro () for parent classes that are also singletons | def _walk_mro(cls):
"""Walk the cls.mro() for parent classes that are also singletons
For use in instance()
"""
for subclass in cls.mro():
if issubclass(cls, subclass) and \
issubclass(subclass, SingletonConfigurable) and \
subclass != SingletonConfigurable:
yield subclass |
unset _instance for this class and singleton parents. | def clear_instance(cls):
"""unset _instance for this class and singleton parents.
"""
if not cls.initialized():
return
for subclass in cls._walk_mro():
if isinstance(subclass._instance, cls):
# only clear instances that are instances
# of the calling class
subclass._instance = None |
Returns a global instance of this class. | def instance(cls, *args, **kwargs):
"""Returns a global instance of this class.
This method create a new instance if none have previously been created
and returns a previously created instance is one already exists.
The arguments and keyword arguments passed to this method are passed
on to the :meth:`__init__` method of the class upon instantiation.
Examples
--------
Create a singleton class using instance, and retrieve it::
>>> from IPython.config.configurable import SingletonConfigurable
>>> class Foo(SingletonConfigurable): pass
>>> foo = Foo.instance()
>>> foo == Foo.instance()
True
Create a subclass that is retrived using the base class instance::
>>> class Bar(SingletonConfigurable): pass
>>> class Bam(Bar): pass
>>> bam = Bam.instance()
>>> bam == Bar.instance()
True
"""
# Create and save the instance
if cls._instance is None:
inst = cls(*args, **kwargs)
# Now make sure that the instance will also be returned by
# parent classes' _instance attribute.
for subclass in cls._walk_mro():
subclass._instance = inst
if isinstance(cls._instance, cls):
return cls._instance
else:
raise MultipleInstanceError(
'Multiple incompatible subclass instances of '
'%s are being created.' % cls.__name__
) |
Configure plugin. | def configure(self, options, conf):
"""Configure plugin.
"""
if not self.can_configure:
return
self.enabled = options.detailedErrors
self.conf = conf |
Add detail from traceback inspection to error message of a failure. | def formatFailure(self, test, err):
"""Add detail from traceback inspection to error message of a failure.
"""
ec, ev, tb = err
tbinfo = inspect_traceback(tb)
test.tbinfo = tbinfo
return (ec, '\n'.join([str(ev), tbinfo]), tb) |
a light excepthook adding a small message to the usual traceback | def crash_handler_lite(etype, evalue, tb):
"""a light excepthook, adding a small message to the usual traceback"""
traceback.print_exception(etype, evalue, tb)
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
# we are in a Shell environment, give %magic example
config = "%config "
else:
# we are not in a shell, show generic config
config = "c."
print >> sys.stderr, _lite_message_template.format(email=author_email, config=config) |
Return a string containing a crash report. | def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
rpt_add = report.append
rpt_add(sys_info())
try:
config = pformat(self.app.config)
rpt_add(sec_sep)
rpt_add('Application name: %s\n\n' % self.app_name)
rpt_add('Current user configuration structure:\n\n')
rpt_add(config)
except:
pass
rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
return ''.join(report) |
{ % setvar <var_name > to <var_value > % } | def setvar(parser, token):
""" {% setvar <var_name> to <var_value> %} """
try:
setvar, var_name, to_, var_value = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('Invalid arguments for %r' % token.split_contents()[0])
return SetVarNode(var_name, var_value) |
Reimplemented to emit signals instead of making callbacks. | def call_handlers(self, msg):
""" Reimplemented to emit signals instead of making callbacks.
"""
# Emit the generic signal.
self.message_received.emit(msg)
# Emit signals for specialized message types.
msg_type = msg['header']['msg_type']
signal = getattr(self, msg_type, None)
if signal:
signal.emit(msg)
if not self._handlers_called:
self.first_reply.emit()
self._handlers_called = True |
Reimplemented to emit signals instead of making callbacks. | def call_handlers(self, msg):
""" Reimplemented to emit signals instead of making callbacks.
"""
# Emit the generic signal.
self.message_received.emit(msg)
# Emit signals for specialized message types.
msg_type = msg['header']['msg_type']
signal = getattr(self, msg_type + '_received', None)
if signal:
signal.emit(msg)
elif msg_type in ('stdout', 'stderr'):
self.stream_received.emit(msg) |
Reimplemented to ensure that signals are dispatched immediately. | def flush(self):
""" Reimplemented to ensure that signals are dispatched immediately.
"""
super(QtSubSocketChannel, self).flush()
QtCore.QCoreApplication.instance().processEvents() |
Reimplemented to emit signals instead of making callbacks. | def call_handlers(self, msg):
""" Reimplemented to emit signals instead of making callbacks.
"""
# Emit the generic signal.
self.message_received.emit(msg)
# Emit signals for specialized message types.
msg_type = msg['header']['msg_type']
if msg_type == 'input_request':
self.input_requested.emit(msg) |
Reimplemented for proper heartbeat management. | def start_kernel(self, *args, **kw):
""" Reimplemented for proper heartbeat management.
"""
if self._shell_channel is not None:
self._shell_channel.reset_first_reply()
super(QtKernelManager, self).start_kernel(*args, **kw)
self.started_kernel.emit() |
Reimplemented to emit signal. | def start_channels(self, *args, **kw):
""" Reimplemented to emit signal.
"""
super(QtKernelManager, self).start_channels(*args, **kw)
self.started_channels.emit() |
Reimplemented for proper heartbeat management. | def shell_channel(self):
""" Reimplemented for proper heartbeat management.
"""
if self._shell_channel is None:
self._shell_channel = super(QtKernelManager, self).shell_channel
self._shell_channel.first_reply.connect(self._first_reply)
return self._shell_channel |
Restore bytes of image data from unicode - only formats. Base64 encoding is handled elsewhere. Bytes objects in the notebook are always b64 - encoded. We DO NOT encode/ decode around file formats. | def restore_bytes(nb):
"""Restore bytes of image data from unicode-only formats.
Base64 encoding is handled elsewhere. Bytes objects in the notebook are
always b64-encoded. We DO NOT encode/decode around file formats.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
output.png = str_to_bytes(output.png, 'ascii')
if 'jpeg' in output:
output.jpeg = str_to_bytes(output.jpeg, 'ascii')
return nb |
join lines that have been written by splitlines () Has logic to protect against splitlines () which should have been splitlines ( True ) | def _join_lines(lines):
"""join lines that have been written by splitlines()
Has logic to protect against `splitlines()`, which
should have been `splitlines(True)`
"""
if lines and lines[0].endswith(('\n', '\r')):
# created by splitlines(True)
return u''.join(lines)
else:
# created by splitlines()
return u'\n'.join(lines) |
rejoin multiline text into strings For reversing effects of split_lines ( nb ). This only rejoins lines that have been split so if text objects were not split they will pass through unchanged. Used when reading JSON files that may have been passed through split_lines. | def rejoin_lines(nb):
"""rejoin multiline text into strings
For reversing effects of ``split_lines(nb)``.
This only rejoins lines that have been split, so if text objects were not split
they will pass through unchanged.
Used when reading JSON files that may have been passed through split_lines.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
if 'input' in cell and isinstance(cell.input, list):
cell.input = _join_lines(cell.input)
for output in cell.outputs:
for key in _multiline_outputs:
item = output.get(key, None)
if isinstance(item, list):
output[key] = _join_lines(item)
else: # text, heading cell
for key in ['source', 'rendered']:
item = cell.get(key, None)
if isinstance(item, list):
cell[key] = _join_lines(item)
return nb |
Restore all bytes objects in the notebook from base64 - encoded strings. Note: This is never used | def base64_decode(nb):
"""Restore all bytes objects in the notebook from base64-encoded strings.
Note: This is never used
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
if isinstance(output.png, unicode):
output.png = output.png.encode('ascii')
output.png = decodestring(output.png)
if 'jpeg' in output:
if isinstance(output.jpeg, unicode):
output.jpeg = output.jpeg.encode('ascii')
output.jpeg = decodestring(output.jpeg)
return nb |
Base64 encode all bytes objects in the notebook. These will be b64 - encoded unicode strings Note: This is never used | def base64_encode(nb):
"""Base64 encode all bytes objects in the notebook.
These will be b64-encoded unicode strings
Note: This is never used
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
output.png = encodestring(output.png).decode('ascii')
if 'jpeg' in output:
output.jpeg = encodestring(output.jpeg).decode('ascii')
return nb |
Read a notebook from a file like object | def read(self, fp, **kwargs):
"""Read a notebook from a file like object"""
nbs = fp.read()
if not py3compat.PY3 and not isinstance(nbs, unicode):
nbs = py3compat.str_to_unicode(nbs)
return self.reads(nbs, **kwargs) |
Write a notebook to a file like object | def write(self, nb, fp, **kwargs):
"""Write a notebook to a file like object"""
nbs = self.writes(nb,**kwargs)
if not py3compat.PY3 and not isinstance(nbs, unicode):
# this branch is likely only taken for JSON on Python 2
nbs = py3compat.str_to_unicode(nbs)
return fp.write(nbs) |
Return the list of mirrors from the last record found on the DNS entry:: | def get_mirrors(hostname=None):
"""Return the list of mirrors from the last record found on the DNS
entry::
>>> from pip.index import get_mirrors
>>> get_mirrors()
['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org',
'd.pypi.python.org']
Originally written for the distutils2 project by Alexis Metaireau.
"""
if hostname is None:
hostname = DEFAULT_MIRROR_HOSTNAME
# return the last mirror registered on PyPI.
last_mirror_hostname = None
try:
last_mirror_hostname = socket.gethostbyname_ex(hostname)[0]
except socket.gaierror:
return []
if not last_mirror_hostname or last_mirror_hostname == DEFAULT_MIRROR_HOSTNAME:
last_mirror_hostname = "z.pypi.python.org"
end_letter = last_mirror_hostname.split(".", 1)
# determine the list from the last one.
return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])] |
Read from a pipe ignoring EINTR errors. | def read_no_interrupt(p):
"""Read from a pipe ignoring EINTR errors.
This is necessary because when reading from pipes with GUI event loops
running in the background, often interrupts are raised that stop the
command from completing."""
import errno
try:
return p.read()
except IOError, err:
if err.errno != errno.EINTR:
raise |
Open a command in a shell subprocess and execute a callback. | def process_handler(cmd, callback, stderr=subprocess.PIPE):
"""Open a command in a shell subprocess and execute a callback.
This function provides common scaffolding for creating subprocess.Popen()
calls. It creates a Popen object and then calls the callback with it.
Parameters
----------
cmd : str
A string to be executed with the underlying system shell (by calling
:func:`Popen` with ``shell=True``.
callback : callable
A one-argument function that will be called with the Popen object.
stderr : file descriptor number, optional
By default this is set to ``subprocess.PIPE``, but you can also pass the
value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
the same file descriptor as its stdout. This is useful to read stdout
and stderr combined in the order they are generated.
Returns
-------
The return value of the provided callback is returned.
"""
sys.stdout.flush()
sys.stderr.flush()
# On win32, close_fds can't be true when using pipes for stdin/out/err
close_fds = sys.platform != 'win32'
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
close_fds=close_fds)
try:
out = callback(p)
except KeyboardInterrupt:
print('^C')
sys.stdout.flush()
sys.stderr.flush()
out = None
finally:
# Make really sure that we don't leave processes behind, in case the
# call above raises an exception
# We start by assuming the subprocess finished (to avoid NameErrors
# later depending on the path taken)
if p.returncode is None:
try:
p.terminate()
p.poll()
except OSError:
pass
# One last try on our way out
if p.returncode is None:
try:
p.kill()
except OSError:
pass
return out |
Return standard output of executing cmd in a shell. | def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
if out is None:
return ''
return py3compat.bytes_to_str(out) |
Return ( standard output standard error ) of executing cmd in a shell. | def getoutputerror(cmd):
"""Return (standard output, standard error) of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
stderr : str
"""
out_err = process_handler(cmd, lambda p: p.communicate())
if out_err is None:
return '', ''
out, err = out_err
return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err) |
Split a command line s arguments in a shell - like manner. | def arg_split(s, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
if strict=False, then any errors shlex.split would raise will result in the
unparsed remainder being the last element of the list, rather than raising.
This is because we sometimes use arg_split to parse things other than
command-line args.
"""
# Unfortunately, python's shlex module is buggy with unicode input:
# http://bugs.python.org/issue1170
# At least encoding the input when it's unicode seems to help, but there
# may be more problems lurking. Apparently this is fixed in python3.
is_unicode = False
if (not py3compat.PY3) and isinstance(s, unicode):
is_unicode = True
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
# Extract tokens, ensuring that things like leaving open quotes
# does not cause this to raise. This is important, because we
# sometimes pass Python source through this (e.g. %timeit f(" ")),
# and it shouldn't raise an exception.
# It may be a bad idea to parse things that are not command-line args
# through this function, but we do, so let's be safe about it.
lex.commenters='' #fix for GH-1269
tokens = []
while True:
try:
tokens.append(lex.next())
except StopIteration:
break
except ValueError:
if strict:
raise
# couldn't parse, get remaining blob as last token
tokens.append(lex.token)
break
if is_unicode:
# Convert the tokens back to unicode.
tokens = [x.decode('utf-8') for x in tokens]
return tokens |
TestSuite replacement entry point. Use anywhere you might use a unittest. TestSuite. The collector will by default load options from all config files and execute loader. loadTestsFromNames () on the configured testNames or. if no testNames are configured. | def collector():
"""TestSuite replacement entry point. Use anywhere you might use a
unittest.TestSuite. The collector will, by default, load options from
all config files and execute loader.loadTestsFromNames() on the
configured testNames, or '.' if no testNames are configured.
"""
# plugins that implement any of these methods are disabled, since
# we don't control the test runner and won't be able to run them
# finalize() is also not called, but plugins that use it aren't disabled,
# because capture needs it.
setuptools_incompat = ('report', 'prepareTest',
'prepareTestLoader', 'prepareTestRunner',
'setOutputStream')
plugins = RestrictedPluginManager(exclude=setuptools_incompat)
conf = Config(files=all_config_files(),
plugins=plugins)
conf.configure(argv=['collector'])
loader = defaultTestLoader(conf)
if conf.testNames:
suite = loader.loadTestsFromNames(conf.testNames)
else:
suite = loader.loadTestsFromNames(('.',))
return FinalizingSuiteWrapper(suite, plugins.finalize) |
Compress a directory history into a new one with at most 20 entries. | def compress_dhist(dh):
"""Compress a directory history into a new one with at most 20 entries.
Return a new list made from the first and last 10 elements of dhist after
removal of duplicates.
"""
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail |
Class decorator for all subclasses of the main Magics class. | def magics_class(cls):
"""Class decorator for all subclasses of the main Magics class.
Any class that subclasses Magics *must* also apply this decorator, to
ensure that all the methods that have been decorated as line/cell magics
get correctly registered in the class instance. This is necessary because
when method decorators run, the class does not exist yet, so they
temporarily store their information into a module global. Application of
this class decorator copies that global data to the class instance and
clears the global.
Obviously, this mechanism is not thread-safe, which means that the
*creation* of subclasses of Magic should only be done in a single-thread
context. Instantiation of the classes has no restrictions. Given that
these classes are typically created at IPython startup time and before user
application code becomes active, in practice this should not pose any
problems.
"""
cls.registered = True
cls.magics = dict(line = magics['line'],
cell = magics['cell'])
magics['line'] = {}
magics['cell'] = {}
return cls |
Utility function to store a function as a magic of a specific kind. | def record_magic(dct, magic_kind, magic_name, func):
"""Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store.
"""
if magic_kind == 'line_cell':
dct['line'][magic_name] = dct['cell'][magic_name] = func
else:
dct[magic_kind][magic_name] = func |
Decorator factory for methods in Magics subclasses. | def _method_magic_marker(magic_kind):
"""Decorator factory for methods in Magics subclasses.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.func_name
retval = decorator(call, func)
record_magic(magics, magic_kind, name, name)
elif isinstance(arg, basestring):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
record_magic(magics, magic_kind, name, func.func_name)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
return magic_deco |
Decorator factory for standalone functions. | def _function_magic_marker(magic_kind):
"""Decorator factory for standalone functions.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
# Find get_ipython() in the caller's namespace
caller = sys._getframe(1)
for ns in ['f_locals', 'f_globals', 'f_builtins']:
get_ipython = getattr(caller, ns).get('get_ipython')
if get_ipython is not None:
break
else:
raise NameError('Decorator can only run in context where '
'`get_ipython` exists')
ip = get_ipython()
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.func_name
ip.register_magic_function(func, magic_kind, name)
retval = decorator(call, func)
elif isinstance(arg, basestring):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
ip.register_magic_function(func, magic_kind, name)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
ds = _docstring_template.format('function', magic_kind)
ds += dedent("""
Note: this decorator can only be used in a context where IPython is already
active, so that the `get_ipython()` call succeeds. You can therefore use
it in your startup files loaded after IPython initializes, but *not* in the
IPython configuration file itself, which is executed before IPython is
fully up and running. Any file located in the `startup` subdirectory of
your configuration profile will be OK in this sense.
""")
magic_deco.__doc__ = ds
return magic_deco |
Return dict of documentation of magic functions. | def lsmagic_docs(self, brief=False, missing=''):
"""Return dict of documentation of magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a dict keyed by magic
name whose value is the function docstring. If a docstring is
unavailable, the value of `missing` is used instead.
If brief is True, only the first line of each docstring will be returned.
"""
docs = {}
for m_type in self.magics:
m_docs = {}
for m_name, m_func in self.magics[m_type].iteritems():
if m_func.__doc__:
if brief:
m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
else:
m_docs[m_name] = m_func.__doc__.rstrip()
else:
m_docs[m_name] = missing
docs[m_type] = m_docs
return docs |
Register one or more instances of Magics. | def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with IPython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError("Class of magics %r was constructed without "
"the @register_magics class decorator")
if type(m) in (type, MetaHasTraits):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype]) |
Expose a standalone function as magic function for IPython. | def register_function(self, func, magic_kind='line', magic_name=None):
"""Expose a standalone function as magic function for IPython.
This will create an IPython magic (line, cell or both) from a
standalone function. The functions should have the following
signatures:
* For line magics: `def f(line)`
* For cell magics: `def f(line, cell)`
* For a function that does both: `def f(line, cell=None)`
In the latter case, the function will be called with `cell==None` when
invoked as `%f`, and with cell as a string when invoked as `%%f`.
Parameters
----------
func : callable
Function to be registered as a magic.
magic_kind : str
Kind of magic, one of 'line', 'cell' or 'line_cell'
magic_name : optional str
If given, the name the magic will have in the IPython namespace. By
default, the name of the function itself is used.
"""
# Create the new method in the user_magics and register it in the
# global table
validate_type(magic_kind)
magic_name = func.func_name if magic_name is None else magic_name
setattr(self.user_magics, magic_name, func)
record_magic(self.magics, magic_kind, magic_name, func) |
[ Deprecated ] Expose own function as magic function for IPython. | def define_magic(self, name, func):
"""[Deprecated] Expose own function as magic function for IPython.
Example::
def foo_impl(self, parameter_s=''):
'My very own magic!. (Use docstrings, IPython reads them).'
print 'Magic function. Passed parameter is between < >:'
print '<%s>' % parameter_s
print 'The self object is:', self
ip.define_magic('foo',foo_impl)
"""
meth = types.MethodType(func, self.user_magics)
setattr(self.user_magics, name, meth)
record_magic(self.magics, 'line', name, meth) |
Format a string for latex inclusion. | def format_latex(self, strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng |
Parse options passed to an argument string. | def parse_options(self, arg_str, opt_str, *long_opts, **kw):
"""Parse options passed to an argument string.
The interface is similar to that of getopt(), but it returns back a
Struct with the options as keys and the stripped argument string still
as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Options:
-mode: default 'string'. If given as 'list', the argument string is
returned as a list (split on whitespace) instead of a string.
-list_all: put all option values in lists. Normally only options
appearing more than once are put in a list.
-posix (True): whether to split the input line in POSIX mode or not,
as per the conventions outlined in the shlex module from the
standard library."""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError,'incorrect mode given: %s' % mode
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
strict = kw.get('strict', True)
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str, posix, strict)
# Do regular option processing
try:
opts,args = getopt(argv, opt_str, long_opts)
except GetoptError,e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args |
Make an entry in the options_table for fn with value optstr | def default_option(self, fn, optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr |
Show a basic reference about the GUI Console. | def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference, auto_html=True) |
Get a member from an object by ( string ) name | def get_member(thing_obj, member_string):
"""Get a member from an object by (string) name"""
mems = {x[0]: x[1] for x in inspect.getmembers(thing_obj)}
if member_string in mems:
return mems[member_string] |
Return a live function from a full dotted path. Must be either a plain function directly in a module a class function or a static function. ( No modules classes or instance methods since those can t be called as tasks. ) | def func_from_string(callable_str):
"""Return a live function from a full dotted path. Must be either a plain function
directly in a module, a class function, or a static function. (No modules, classes,
or instance methods, since those can't be called as tasks.)"""
components = callable_str.split('.')
func = None
if len(components) < 2:
raise ValueError("Need full dotted path to task function")
elif len(components) == 2:
mod_name = components[0]
func_name = components[1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"{func_name} is not a member of {mod_name}")
else:
mod_name = '.'.join(components[:-1])
func_name = components[-1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
mod_name = '.'.join(components[:-2])
class_name = components[-2]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
klass = get_member(mod, class_name)
if klass is None:
raise ValueError(f"Class {class_name} is not a member of {mod_name}")
func = get_member(klass, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}.{class_name}")
if func is None:
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}")
if inspect.ismodule(func):
raise ValueError("Cannot call module directly")
if inspect.isclass(func):
raise ValueError("Cannot call class directly")
try:
sig = [x for x in inspect.signature(func).parameters]
except TypeError:
raise ValueError(f"{callable_str} ({str(type(func))[1:-1]}) is not a callable object")
if len(sig) == 1:
if sig[0] == 'message':
return func
else:
raise ValueError("Task function must have one parameter, named 'message'")
elif len(sig)==2 and sig[0]=='self' and sig[1]=='message':
# We only check for the conventional 'self', but if you're using something else,
# you deserve the pain you'll have trying to debug this.
raise ValueError("Can't call instance method without an instance! (Try sisy.models.task_with_callable)")
else:
raise ValueError("Improper signature for task function (needs only 'message')") |
Factory function to create a properly initialized task. | def task_with_callable(the_callable, label=None, schedule=DEFAULT_SCHEDULE, userdata=None, pk_override=None):
"""Factory function to create a properly initialized task."""
task = Task()
if isinstance(the_callable, str):
if pk_override is not None:
components = the_callable.split('.')
info = dict(
func_type='instancemethod',
module_name='.'.join(components[:-2]),
class_name=components[-2],
class_path='.'.join(components[:-1]),
model_pk=pk_override,
func_name=components[-1],
func_path=the_callable,
)
task.funcinfo = info
else:
task.funcinfo = get_func_info(func_from_string(the_callable))
else:
task.funcinfo = get_func_info(the_callable)
if label is None:
task.label = task.funcinfo['func_path']
else:
task.label = label
task.schedule = schedule
if not croniter.is_valid(task.schedule):
raise ValueError(f"Cron schedule {task.schedule} is not valid")
if userdata is None:
task.userdata = dict()
else:
if isinstance(userdata, dict):
task.userdata = userdata
else:
raise ValueError("Userdata must be a dictionary of JSON-serializable data")
return task |
Return task info dictionary from task label. Internal function pretty much only used in migrations since the model methods aren t there. | def taskinfo_with_label(label):
"""Return task info dictionary from task label. Internal function,
pretty much only used in migrations since the model methods aren't there."""
task = Task.objects.get(label=label)
info = json.loads(task._func_info)
return info |
Find and return a callable object from a task info dictionary | def func_from_info(self):
"""Find and return a callable object from a task info dictionary"""
info = self.funcinfo
functype = info['func_type']
if functype in ['instancemethod', 'classmethod', 'staticmethod']:
the_modelclass = get_module_member_by_dottedpath(info['class_path'])
if functype == 'instancemethod':
the_modelobject = the_modelclass.objects.get(pk=info['model_pk'])
the_callable = get_member(the_modelobject, info['func_name'])
else:
the_callable = get_member(the_modelclass, info['func_name'])
return the_callable
elif functype == 'function':
mod = import_module(info['module_name'])
the_callable = get_member(mod, info['func_name'])
return the_callable
else:
raise ValueError(f"Unknown functype '{functype} in task {self.pk} ({self.label})") |
Internal task - runner class method called by: py: func: sisy. consumers. run_heartbeat | def run_tasks(cls):
"""Internal task-runner class method, called by :py:func:`sisy.consumers.run_heartbeat`"""
now = timezone.now()
tasks = cls.objects.filter(enabled=True)
for task in tasks:
if task.next_run == HAS_NOT_RUN:
task.calc_next_run()
if task.next_run < now:
if (task.start_running < now):
if (task.end_running > now):
task.run_asap()
else:
task.enabled = False
task.save()
Channel(KILL_TASK_CHANNEL).send({'id': task.pk}) |
Calculate next run time of this task | def calc_next_run(self):
"""Calculate next run time of this task"""
base_time = self.last_run
if self.last_run == HAS_NOT_RUN:
if self.wait_for_schedule is False:
self.next_run = timezone.now()
self.wait_for_schedule = False # reset so we don't run on every clock tick
self.save()
return
else:
base_time = timezone.now()
self.next_run = croniter(self.schedule, base_time).get_next(datetime)
self.save() |
Internal instance method to submit this task for running immediately. Does not handle any iteration end - date etc. processing. | def submit(self, timestamp):
"""Internal instance method to submit this task for running immediately.
Does not handle any iteration, end-date, etc., processing."""
Channel(RUN_TASK_CHANNEL).send({'id':self.pk, 'ts': timestamp.timestamp()}) |
Internal instance method run by worker process to actually run the task callable. | def run(self, message):
"""Internal instance method run by worker process to actually run the task callable."""
the_callable = self.func_from_info()
try:
task_message = dict(
task=self,
channel_message=message,
)
the_callable(task_message)
finally:
if self.end_running < self.next_run:
self.enabled=False
Channel(KILL_TASK_CHANNEL).send({'id': self.pk})
return
if self.iterations == 0:
return
else:
self.iterations -= 1
if self.iterations == 0:
self.enabled = False
Channel(KILL_TASK_CHANNEL).send({'id':self.pk})
self.save() |
Instance method to run this task immediately. | def run_asap(self):
"""Instance method to run this task immediately."""
now = timezone.now()
self.last_run = now
self.calc_next_run()
self.save()
self.submit(now) |
Class method to run a callable with a specified number of iterations | def run_iterations(cls, the_callable, iterations=1, label=None, schedule='* * * * * *', userdata = None, run_immediately=False, delay_until=None):
"""Class method to run a callable with a specified number of iterations"""
task = task_with_callable(the_callable, label=label, schedule=schedule, userdata=userdata)
task.iterations = iterations
if delay_until is not None:
if isinstance(delay_until, datetime):
if delay_until > timezone.now():
task.start_running = delay_until
else:
raise ValueError("Task cannot start running in the past")
else:
raise ValueError("delay_until must be a datetime.datetime instance")
if run_immediately:
task.next_run = timezone.now()
else:
task.calc_next_run()
task.save() |
Class method to run a one - shot task immediately. | def run_once(cls, the_callable, userdata=None, delay_until=None):
"""Class method to run a one-shot task, immediately."""
cls.run_iterations(the_callable, userdata=userdata, run_immediately=True, delay_until=delay_until) |
Set the url file. | def find_url_file(self):
"""Set the url file.
Here we don't try to actually see if it exists for is valid as that
is hadled by the connection logic.
"""
config = self.config
# Find the actual controller key file
if not self.url_file:
self.url_file = os.path.join(
self.profile_dir.security_dir,
self.url_file_name
) |
load config from a JSON connector file at a * lower * priority than command - line/ config files. | def load_connector_file(self):
"""load config from a JSON connector file,
at a *lower* priority than command-line/config files.
"""
self.log.info("Loading url_file %r", self.url_file)
config = self.config
with open(self.url_file) as f:
d = json.loads(f.read())
if 'exec_key' in d:
config.Session.key = cast_bytes(d['exec_key'])
try:
config.EngineFactory.location
except AttributeError:
config.EngineFactory.location = d['location']
d['url'] = disambiguate_url(d['url'], config.EngineFactory.location)
try:
config.EngineFactory.url
except AttributeError:
config.EngineFactory.url = d['url']
try:
config.EngineFactory.sshserver
except AttributeError:
config.EngineFactory.sshserver = d['ssh'] |
Promote engine to listening kernel accessible to frontends. | def bind_kernel(self, **kwargs):
"""Promote engine to listening kernel, accessible to frontends."""
if self.kernel_app is not None:
return
self.log.info("Opening ports for direct connections as an IPython kernel")
kernel = self.kernel
kwargs.setdefault('config', self.config)
kwargs.setdefault('log', self.log)
kwargs.setdefault('profile_dir', self.profile_dir)
kwargs.setdefault('session', self.engine.session)
app = self.kernel_app = IPKernelApp(**kwargs)
# allow IPKernelApp.instance():
IPKernelApp._instance = app
app.init_connection_file()
# relevant contents of init_sockets:
app.shell_port = app._bind_socket(kernel.shell_streams[0], app.shell_port)
app.log.debug("shell ROUTER Channel on port: %i", app.shell_port)
app.iopub_port = app._bind_socket(kernel.iopub_socket, app.iopub_port)
app.log.debug("iopub PUB Channel on port: %i", app.iopub_port)
kernel.stdin_socket = self.engine.context.socket(zmq.ROUTER)
app.stdin_port = app._bind_socket(kernel.stdin_socket, app.stdin_port)
app.log.debug("stdin ROUTER Channel on port: %i", app.stdin_port)
# start the heartbeat, and log connection info:
app.init_heartbeat()
app.log_connection_info()
app.write_connection_file() |
Check whether pid exists in the current process table. | def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if not isinstance(pid, int):
raise TypeError('an integer is required')
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError:
e = sys.exc_info()[1]
return e.errno == errno.EPERM
else:
return True |
Return disk usage associated with path. | def get_disk_usage(path):
"""Return disk usage associated with path."""
st = os.statvfs(path)
free = (st.f_bavail * st.f_frsize)
total = (st.f_blocks * st.f_frsize)
used = (st.f_blocks - st.f_bfree) * st.f_frsize
percent = usage_percent(used, total, _round=1)
# NB: the percentage is -5% than what shown by df due to
# reserved blocks that we are currently not considering:
# http://goo.gl/sWGbH
return nt_diskinfo(total, used, free, percent) |
Execute a test described by a YAML file. | def timid(ctxt, test, key=None, check=False, exts=None):
"""
Execute a test described by a YAML file.
:param ctxt: A ``timid.context.Context`` object.
:param test: The name of a YAML file containing the test
description. Note that the current working directory
set up in ``ctxt.environment`` does not affect the
resolution of this file.
:param key: An optional key into the test description file. If
not ``None``, the file named by ``test`` must be a
YAML dictionary of lists of steps; otherwise, it must
be a simple list of steps.
:param check: If ``True``, only performs a syntax check of the
test steps indicated by ``test`` and ``key``; the
test itself is not run.
:param exts: An instance of ``timid.extensions.ExtensionSet``
describing the extensions to be called while
processing the test steps.
"""
# Normalize the extension set
if exts is None:
exts = extensions.ExtensionSet()
# Begin by reading the steps and adding them to the list in the
# context (which may already have elements thanks to the
# extensions)
ctxt.emit('Reading test steps from %s%s...' %
(test, '[%s]' % key if key else ''), debug=True)
ctxt.steps += exts.read_steps(ctxt, steps.Step.parse_file(ctxt, test, key))
# If all we were supposed to do was check, well, we've
# accomplished that...
if check:
return None
# Now we execute each step in turn
for idx, step in enumerate(ctxt.steps):
# Emit information about what we're doing
ctxt.emit('[Step %d]: %s . . .' % (idx, step.name))
# Run through extension hooks
if exts.pre_step(ctxt, step, idx):
ctxt.emit('[Step %d]: `- Step %s' %
(idx, steps.states[steps.SKIPPED]))
continue
# Now execute the step
result = step(ctxt)
# Let the extensions process the result of the step
exts.post_step(ctxt, step, idx, result)
# Emit the result
ctxt.emit('[Step %d]: `- Step %s%s' %
(idx, steps.states[result.state],
' (ignored)' if result.ignore else ''))
# Was the step a success?
if not result:
msg = 'Test step failure'
if result.msg:
msg += ': %s' % result.msg
return msg
# All done! And a success, to boot...
return None |
A cli_tools processor function that interfaces between the command line and the timid () function. This function is responsible for allocating a timid. context. Context object and initializing the activated extensions and for calling those extensions finalize () method. | def _processor(args):
"""
A ``cli_tools`` processor function that interfaces between the
command line and the ``timid()`` function. This function is
responsible for allocating a ``timid.context.Context`` object and
initializing the activated extensions, and for calling those
extensions' ``finalize()`` method.
:param args: The ``argparse.Namespace`` object containing the
results of argument processing.
"""
# Begin by initializing a context
args.ctxt = context.Context(args.verbose, args.debug, args.directory)
# Now set up the extension set
args.exts = extensions.ExtensionSet.activate(args.ctxt, args)
# Update the environment and the variables
args.ctxt.environment.update(args.environment)
args.ctxt.variables.update(args.variables)
# Call the actual timid() function
try:
result = yield
# If an exception occurred, give the extensions an opportunity to
# handle it
except Exception as exc:
if args.debug:
# Make sure we emit a proper traceback
traceback.print_exc(file=sys.stderr)
# The exception is the result, from the point of view of the
# extensions
result = exc
# Allow the extensions to handle the result
result = args.exts.finalize(args.ctxt, result)
# If the final result is an exception, convert it to a string for
# yielding back to cli_tools
if isinstance(result, Exception):
result = str(result)
# This line is covered, but coverage appears to be missing it for
# some reason
yield result |
>>> lang_instance = create_lang_instance () >>> lang_instance. aml_evaluate ( lang_instance. aml_compile ( 1 = 1 )) True >>> li = create_lang_instance () >>> c = li. aml_compile >>> e = li. aml_evaluate >>> p = li. aml_translate_python >>> s = li. aml_translate_sql >>> u = li. aml_suggest >>> e ( c ( 1 = 0 )) False >>> e ( c ( 1 = 1 )) True >>> e ( c ( ( 1 = 1 ) )) True >>> e ( c ( 1 > 1 )) False >>> e ( c ( not 1 > 1 )) True >>> e ( c ( 1 ! = 1 )) False >>> e ( c ( - 2 = - 2 )) True >>> eval ( p ( c ( - 2 = - 2 ))) True >>> eval ( p ( c ( - 2 > = - 1 ))) False >>> eval ( p ( c ( - 2 < = - 1 ))) True >>> eval ( p ( c ( 2 > = 1 ))) True >>> eval ( p ( c ( 2 < = 1 ))) False >>> eval ( p ( c ( null = null ))) True >>> eval ( p ( c ( 1 = null ))) False >>> e ( c ( foo = foo )) True >>> e ( c ( foo = foo )) True >>> e ( c ( foo = \\ foo \\ )) True >>> e ( c ( fo \\ o = fo \\ o )) True >>> e ( c ( foo + = + foo )) True >>> li = create_lang_instance ( { foo: 1 } ) ; >>> c = li. aml_compile >>> e = li. aml_evaluate >>> e ( c ( foo = 1 )) True >>> li = create_lang_instance ( { foo: 1. 00 } ) >>> c = li. aml_compile >>> e = li. aml_evaluate >>> e ( c ( foo = 1 )) True >>> li = create_lang_instance ( { foo: 2. 24 } ) >>> c = li. aml_compile >>> e = li. aml_evaluate >>> e ( c ( foo = 2. 24 )) True >>> e ( c ( foo = 2. 2399 or foo = 2. 24 )) True >>> e ( c ( foo = 2. 2399 or foo = 2. 2401 )) False >>> e ( c ( foo in ( 2. 2399 2. 24 null ) )) True >>> e ( c ( foo in ( 2. 2399 2. 2401 null ) )) False >>> e ( c ( null in ( 2. 2399 2. 2401 null ) )) True >>> e ( c ( null in ( 2. 2399 2. 2401 null ) )) False >>> e ( c ( null in ( 2. 2399 null null ) )) True >>> li = create_lang_instance ( { foo: foo } ) >>> c = li. aml_compile >>> e = li. aml_evaluate >>> e ( c ( foo = foo )) True >>> li = create_lang_instance () >>> c = li. aml_compile >>> p = li. aml_translate_python >>> s = li. aml_translate_sql >>> s ( c ( null = null )) u null is null >>> p ( c ( null = null )) u None == None >>> s ( c ( null ! = null )) u null is not null >>> p ( c ( null ! = null )) u None ! = None >>> s ( c ( 5 ! = 3 )) u 5 < > 3 >>> p ( c ( 5 ! = 3 )) u 5 ! = 3 >>> p ( c ( 5 in ( 3 4 5 ) )) u 5 in ( 3 4 5 ) >>> p ( s ( 5 in ( 3 4 5 ) )) u 5 in ( 3 4 5 ) >>> li = create_lang_instance ( { foo: bar fo2: ba2 } ) >>> c = li. aml_compile >>> p = li. aml_translate_python >>> e = li. aml_evaluate >>> u = li. aml_suggest >>> u ( 1 = fo ) [ u fo2 u foo ] >>> u ( 1 = FO ) [ u fo2 u foo ] >>> p ( c ( null = null )) u None == None >>> e ( c ( foo = bar )) True >>> e ( c ( fo2 = ba2 )) True | def create_lang_instance(var_map = None):
"""
>>> lang_instance = create_lang_instance()
>>> lang_instance.aml_evaluate(lang_instance.aml_compile('1 = 1'))
True
>>> li = create_lang_instance()
>>> c = li.aml_compile
>>> e = li.aml_evaluate
>>> p = li.aml_translate_python
>>> s = li.aml_translate_sql
>>> u = li.aml_suggest
>>> e(c('1 = 0'))
False
>>> e(c('"1" = "1"'))
True
>>> e(c('(1=1)'))
True
>>> e(c('1 > 1'))
False
>>> e(c('not 1 > 1'))
True
>>> e(c('1 != 1'))
False
>>> e(c('-2 = -2'))
True
>>> eval(p(c('-2 = -2')))
True
>>> eval(p(c('-2 >= -1')))
False
>>> eval(p(c('-2 <= -1')))
True
>>> eval(p(c('2 >= 1')))
True
>>> eval(p(c('2 <= 1')))
False
>>> eval(p(c('null = null')))
True
>>> eval(p(c('1 = null')))
False
>>> e(c('"foo" = "foo"'))
True
>>> e(c('"foo"' '=' "'foo'"))
True
>>> e(c('"foo" = \\'foo\\''))
True
>>> e(c('"fo\\'o" = "fo\\'o"'))
True
>>> e(c("'foo'" + '=' + '"foo"'))
True
>>> li = create_lang_instance({'foo' : 1});
>>> c = li.aml_compile
>>> e = li.aml_evaluate
>>> e(c('foo = 1'))
True
>>> li = create_lang_instance({'foo' : 1.00})
>>> c = li.aml_compile
>>> e = li.aml_evaluate
>>> e(c('foo = 1'))
True
>>> li = create_lang_instance({'foo' : 2.24})
>>> c = li.aml_compile
>>> e = li.aml_evaluate
>>> e(c('foo = 2.24'))
True
>>> e(c('foo = 2.2399 or foo = 2.24'))
True
>>> e(c('foo = 2.2399 or foo = 2.2401'))
False
>>> e(c('foo in (2.2399, 2.24, null,)'))
True
>>> e(c('foo in (2.2399, 2.2401, null,)'))
False
>>> e(c('null in (2.2399, 2.2401, null)'))
True
>>> e(c('"null" in (2.2399, 2.2401, null)'))
False
>>> e(c('"null"' 'in' "(2.2399, 'null', null)"))
True
>>> li = create_lang_instance({'foo' : 'foo'})
>>> c = li.aml_compile
>>> e = li.aml_evaluate
>>> e(c('foo = "foo"'))
True
>>> li = create_lang_instance()
>>> c = li.aml_compile
>>> p = li.aml_translate_python
>>> s = li.aml_translate_sql
>>> s(c('null = null'))
u'null is null'
>>> p(c('null = null'))
u'None == None'
>>> s(c('null != null'))
u'null is not null'
>>> p(c('null != null'))
u'None != None'
>>> s(c('5 != 3'))
u'5 <> 3'
>>> p(c('5 != 3'))
u'5 != 3'
>>> p(c('5 in (3, 4, 5)'))
u'5 in (3, 4, 5,)'
>>> p(s('5 in (3, 4, 5)'))
u'5 in (3, 4, 5)'
>>> li = create_lang_instance({'foo' : 'bar', 'fo2' : 'ba2'})
>>> c = li.aml_compile
>>> p = li.aml_translate_python
>>> e = li.aml_evaluate
>>> u = li.aml_suggest
>>> u('1 = fo')
[u'fo2', u'foo']
>>> u('1 = FO')
[u'fo2', u'foo']
>>> p(c('null = null'))
u'None == None'
>>> e(c('foo = "bar"'))
True
>>> e(c('fo2 = "ba2"'))
True
"""
def py_bool_to_lit(py_bool):
return parse( 'true' if py_bool else 'false', BooleanLiteral)
if not var_map:
class Identifier(str):
grammar = re.compile(r'$a') # This will match nothing.
else:
class Identifier(Keyword):
grammar = Enum(*[K(v) for v in var_map.iterkeys()])
class StringLiteral(str):
def __new__(cls, s):
return super(StringLiteral, cls).__new__(cls, ast.literal_eval(s))
grammar = [re.compile(r'"[^\\\n\r]+?"'), re.compile(r"'[^\\\n\r]+?'")]
class IntegerLiteral(int):
grammar = re.compile(r'-?\d+')
class FloatLiteral(float):
grammar = re.compile(r'-?\d+.\d+')
class BooleanLiteral(Keyword):
grammar = Enum(K('true'), K('false'))
class NullLiteral(Keyword):
grammar = Enum(K('null'))
Comparable = [NullLiteral, FloatLiteral, IntegerLiteral,
StringLiteral, Identifier]
class ListOfComparables(List):
pass
ListOfComparables.grammar = (
'(',
Comparable,
maybe_some(
',',
blank,
Comparable,
),
optional(','),
')'
)
class ComparisonOperator(str):
grammar = re.compile(r'=|>=|<=|>|<|!=|in')
class BooleanFunctionName(Keyword):
grammar = Enum(K('and'), K('or'))
class ComparisonOperation(List):
pass
ComparisonOperation.grammar = (
Comparable,
blank,
attr('comp_op', ComparisonOperator),
blank,
[Comparable, ListOfComparables],
)
class BooleanOperationSimple(List):
# The flag() pypeg2 function works great when parsing but does not work when
# composing (the flag gets output whether it was in the source text or not). So
# a workaround is this:
grammar = (
attr('negated', optional(RE_NOT)),
ComparisonOperation,
)
class BooleanOperation(List):
pass
BooleanOperation.grammar = (
BooleanOperationSimple,
maybe_some(
blank,
BooleanFunctionName,
blank,
BooleanOperationSimple,
),
)
class Expression(List):
pass
Expression.grammar = (
[BooleanOperationSimple, ('(', Expression, ')')],
maybe_some(
blank,
BooleanFunctionName,
blank,
[BooleanOperationSimple, ('(', Expression, ')')],
),
)
def eval_node(node):
en = lambda n: eval_node(n)
if isinstance(node, Identifier):
return var_map[node]
elif isinstance(node, StringLiteral):
return node
elif isinstance(node, IntegerLiteral):
return node
elif isinstance(node, FloatLiteral):
return node
elif isinstance(node, BooleanLiteral):
if node == 'true':
return True
elif node == 'false':
return False
elif isinstance(node, NullLiteral):
return None
elif isinstance(node, ListOfComparables):
return node
elif isinstance(node, ComparisonOperation):
opa, opb = node[0:2]
if node.comp_op == '=':
return en(opa) == en(opb)
elif node.comp_op == '>':
return en(opa) > en(opb)
elif node.comp_op == '<':
return en(opa) < en(opb)
elif node.comp_op == '!=':
return en(opa) != en(opb)
elif node.comp_op == '>=':
return en(opa) >= en(opb)
elif node.comp_op == '<=':
return en(opa) <= en(opb)
elif node.comp_op == 'in':
enopa = en(opa)
enopb = en(opb)
for other_node in list(enopb):
virtual_node = ComparisonOperation([opa, other_node])
virtual_node.comp_op = '='
if en(virtual_node):
return True
return False
elif isinstance(node, BooleanOperationSimple):
a = en(node[0])
if node.negated:
a = not a
return a
elif isinstance(node, BooleanOperation):
if len(node) == 1:
return en(node[0])
fn_map = {
'and': lambda a,b: a and b,
'or': lambda a,b: a or b,
}
def simple_eval(tr):
return py_bool_to_lit(fn_map[tr[1]]( en(tr[0]), en(tr[2])))
for fname in ['and', 'or']:
for i in xrange(1, len(node), 2):
if node[i] == fname:
new_self = (
node[:i-1]
+ [simple_eval(node[i-1:i+2])]
+ node[i+2:]
)
return en(BooleanOperation(new_self))
elif isinstance(node, Expression):
def iter_over_relevant():
for eli, el in enumerate(node):
if eli % 2 == 0:
yield eli, el
if all(
isinstance(el, BooleanOperationSimple)
or
isinstance(el, BooleanLiteral)
for eli, el in iter_over_relevant()
):
res = en(BooleanOperation(node))
return res
else:
for eli, el in iter_over_relevant():
if isinstance(el, Expression):
new_self = (
node[:eli]
+ [py_bool_to_lit(en(el))]
+ node[eli+1:]
)
return en(Expression(new_self))
def compose_node_to_python(node):
return compose(node)
def compose_node_to_sql(node):
return compose(node)
def aml_compile(source):
return parse(source, Expression)
def aml_evaluate(aml_c):
result = eval_node(aml_c)
return result
def aml_translate_python(aml_c):
def comp_op_compose(self, *args, **kwargs):
if self == '=':
return '=='
else:
return self
def null_compose(self, *args, **kwargs):
return 'None'
def string_compose(self, *args, **kwargs):
return '"' + self.replace('"', r'\"') + '"'
ComparisonOperator.compose = comp_op_compose
NullLiteral.compose = null_compose
StringLiteral.compose = string_compose
result = compose_node_to_python(aml_c)
delattr(ComparisonOperator, 'compose')
delattr(NullLiteral, 'compose')
delattr(StringLiteral, 'compose')
return result
def aml_translate_sql(aml_c):
def comp_op_compose(self, *args, **kwargs):
if self == '!=':
return '<>'
else:
return self
def null_compose(self, *args, **kwargs):
return 'null'
def string_compose(self, *args, **kwargs):
return "'" + self.replace("'", "''") + "'"
def comp_operation_compose(self, *args, **kwargs):
if (
(
isinstance(self[0], NullLiteral)
or
isinstance(self[1], NullLiteral)
)
and
(
self.comp_op in ('=', '!=')
)
):
if self.comp_op == '=':
middle = 'is'
else:
middle = 'is not'
else:
middle = compose(self.comp_op)
return ' '.join([
compose(self[0]),
middle,
compose(self[1]),
])
ComparisonOperator.compose = comp_op_compose
NullLiteral.compose = null_compose
ComparisonOperation.compose = comp_operation_compose
StringLiteral.compose = string_compose
result = compose_node_to_sql(aml_c)
delattr(ComparisonOperator, 'compose')
delattr(NullLiteral, 'compose')
delattr(ComparisonOperation, 'compose')
delattr(StringLiteral, 'compose')
return result
def aml_suggest(source):
suggestions = [ ]
if var_map:
if not source:
suggestions = list(var_map.iterkeys())
else:
split = [el for el in re.split(r'(?m)\s+', source) if el]
if split:
for candidate in var_map.iterkeys():
if candidate.lower().startswith(split[-1].lower()):
suggestions.append(candidate)
suggestions.sort()
return suggestions
lang_instance = LangInstance()
lang_instance.aml_compile = aml_compile
lang_instance.aml_evaluate = aml_evaluate
lang_instance.aml_translate_python = aml_translate_python
lang_instance.aml_translate_sql = aml_translate_sql
lang_instance.aml_suggest = aml_suggest
return lang_instance |
Create an interrupt event handle. | def create_interrupt_event():
""" Create an interrupt event handle.
The parent process should use this static method for creating the
interrupt event that is passed to the child process. It should store
this handle and use it with ``send_interrupt`` to interrupt the child
process.
"""
# Create a security attributes struct that permits inheritance of the
# handle by new processes.
# FIXME: We can clean up this mess by requiring pywin32 for IPython.
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [ ("nLength", ctypes.c_int),
("lpSecurityDescriptor", ctypes.c_void_p),
("bInheritHandle", ctypes.c_int) ]
sa = SECURITY_ATTRIBUTES()
sa_p = ctypes.pointer(sa)
sa.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES)
sa.lpSecurityDescriptor = 0
sa.bInheritHandle = 1
return ctypes.windll.kernel32.CreateEventA(
sa_p, # lpEventAttributes
False, # bManualReset
False, # bInitialState
'') |
Run the poll loop. This method never returns. | def run(self):
""" Run the poll loop. This method never returns.
"""
try:
from _winapi import WAIT_OBJECT_0, INFINITE
except ImportError:
from _subprocess import WAIT_OBJECT_0, INFINITE
# Build the list of handle to listen on.
handles = []
if self.interrupt_handle:
handles.append(self.interrupt_handle)
if self.parent_handle:
handles.append(self.parent_handle)
arch = platform.architecture()[0]
c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int
# Listen forever.
while True:
result = ctypes.windll.kernel32.WaitForMultipleObjects(
len(handles), # nCount
(c_int * len(handles))(*handles), # lpHandles
False, # bWaitAll
INFINITE) # dwMilliseconds
if WAIT_OBJECT_0 <= result < len(handles):
handle = handles[result - WAIT_OBJECT_0]
if handle == self.interrupt_handle:
interrupt_main()
elif handle == self.parent_handle:
os._exit(1)
elif result < 0:
# wait failed, just give up and stop polling.
warn("""Parent poll failed. If the frontend dies,
the kernel may be left running. Please let us know
about your system (bitness, Python, etc.) at
ipython-dev@scipy.org""")
return |
Function to initialize settings from command line and/ or custom settings file: return: Returns str with operation type | def initialize():
"""
Function to initialize settings from command line and/or custom settings file
:return: Returns str with operation type
"""
if len(sys.argv) == 1:
usage()
sys.exit()
command = _get_command(sys.argv[1])
try:
opts, args = getopt.getopt(sys.argv[2:], 'h:e:p:u:l:P:s:m:',
['help', 'email=', 'password=', 'url=', 'locale=',
'po-path=', 'settings=', 'message='])
except getopt.GetoptError:
usage()
sys.exit()
params = _get_params_from_options(opts)
_set_settings_file(settings, params)
if command == 'push':
if 'GIT_MESSAGE' in params:
return 'push', params['GIT_MESSAGE']
return 'push', None
return command, None |
Return dictionaries mapping lower case typename ( e. g. tuple ) to type objects from the types package and vice versa. | def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
"""Return dictionaries mapping lower case typename (e.g. 'tuple') to type
objects from the types package, and vice versa."""
typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
typestr2type, type2typestr = {}, {}
for tname in typenamelist:
name = tname[:-4].lower() # Cut 'Type' off the end of the name
obj = getattr(types, tname)
typestr2type[name] = obj
if name not in dont_include_in_type2typestr:
type2typestr[obj] = name
return typestr2type, type2typestr |
is_type ( obj typestr_or_type ) verifies if obj is of a certain type. It can take strings or actual python types for the second argument i. e. tuple < - > TupleType. all matches all types. | def is_type(obj, typestr_or_type):
"""is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type."""
if typestr_or_type == "all":
return True
if type(typestr_or_type) == types.TypeType:
test_type = typestr_or_type
else:
test_type = typestr2type.get(typestr_or_type, False)
if test_type:
return isinstance(obj, test_type)
return False |
Produce a dictionary of an object s attributes. Builds on dir2 by checking that a getattr () call actually succeeds. | def dict_dir(obj):
"""Produce a dictionary of an object's attributes. Builds on dir2 by
checking that a getattr() call actually succeeds."""
ns = {}
for key in dir2(obj):
# This seemingly unnecessary try/except is actually needed
# because there is code out there with metaclasses that
# create 'write only' attributes, where a getattr() call
# will fail even if the attribute appears listed in the
# object's dictionary. Properties can actually do the same
# thing. In particular, Traits use this pattern
try:
ns[key] = getattr(obj, key)
except AttributeError:
pass
return ns |
Filter a namespace dictionary by name pattern and item type. | def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
show_all=True):
"""Filter a namespace dictionary by name pattern and item type."""
pattern = name_pattern.replace("*",".*").replace("?",".")
if ignore_case:
reg = re.compile(pattern+"$", re.I)
else:
reg = re.compile(pattern+"$")
# Check each one matches regex; shouldn't be hidden; of correct type.
return dict((key,obj) for key, obj in ns.iteritems() if reg.match(key) \
and show_hidden(key, show_all) \
and is_type(obj, type_pattern) ) |
Return dictionary of all objects in a namespace dictionary that match type_pattern and filter. | def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
"""Return dictionary of all objects in a namespace dictionary that match
type_pattern and filter."""
pattern_list=filter.split(".")
if len(pattern_list) == 1:
return filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern=type_pattern,
ignore_case=ignore_case, show_all=show_all)
else:
# This is where we can change if all objects should be searched or
# only modules. Just change the type_pattern to module to search only
# modules
filtered = filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern="all",
ignore_case=ignore_case, show_all=show_all)
results = {}
for name, obj in filtered.iteritems():
ns = list_namespace(dict_dir(obj), type_pattern,
".".join(pattern_list[1:]),
ignore_case=ignore_case, show_all=show_all)
for inner_name, inner_obj in ns.iteritems():
results["%s.%s"%(name,inner_name)] = inner_obj
return results |
Check for presence of mutually exclusive keys in a dict. | def mutex_opts(dict,ex_op):
"""Check for presence of mutually exclusive keys in a dict.
Call: mutex_opts(dict,[[op1a,op1b],[op2a,op2b]...]"""
for op1,op2 in ex_op:
if op1 in dict and op2 in dict:
raise ValueError,'\n*** ERROR in Arguments *** '\
'Options '+op1+' and '+op2+' are mutually exclusive.' |
map_method ( method object_list * args ** kw ) - > list | def map_method(method,object_list,*argseq,**kw):
"""map_method(method,object_list,*args,**kw) -> list
Return a list of the results of applying the methods to the items of the
argument sequence(s). If more than one sequence is given, the method is
called with an argument list consisting of the corresponding item of each
sequence. All sequences must be of the same length.
Keyword arguments are passed verbatim to all objects called.
This is Python code, so it's not nearly as fast as the builtin map()."""
out_list = []
idx = 0
for object in object_list:
try:
handler = getattr(object, method)
except AttributeError:
out_list.append(None)
else:
if argseq:
args = map(lambda lst:lst[idx],argseq)
#print 'ob',object,'hand',handler,'ar',args # dbg
out_list.append(handler(args,**kw))
else:
out_list.append(handler(**kw))
idx += 1
return out_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.