Search is not available for this dataset
text
stringlengths
75
104k
def setup(self,pin,mode): """Set the input or output mode for a specified pin. Mode should be either DIR_IN or DIR_OUT. """ self.mraa_gpio.Gpio.dir(self.mraa_gpio.Gpio(pin),self._dir_mapping[mode])
def output(self,pin,value): """Set the specified pin the provided high/low value. Value should be either 1 (ON or HIGH), or 0 (OFF or LOW) or a boolean. """ self.mraa_gpio.Gpio.write(self.mraa_gpio.Gpio(pin), value)
def input(self,pin): """Read the specified pin and return HIGH/true if the pin is pulled high, or LOW/false if pulled low. """ return self.mraa_gpio.Gpio.read(self.mraa_gpio.Gpio(pin))
def add_event_detect(self, pin, edge, callback=None, bouncetime=-1): """Enable edge detection events for a particular GPIO channel. Pin should be type IN. Edge must be RISING, FALLING or BOTH. Callback is a function for the event. Bouncetime is switch bounce timeout in ms for callback """ kwargs = {} if callback: kwargs['callback']=callback if bouncetime > 0: kwargs['bouncetime']=bouncetime self.mraa_gpio.Gpio.isr(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge], **kwargs)
def remove_event_detect(self, pin): """Remove edge detection for a particular GPIO channel. Pin should be type IN. """ self.mraa_gpio.Gpio.isrExit(self.mraa_gpio.Gpio(pin))
def wait_for_edge(self, pin, edge): """Wait for an edge. Pin should be type IN. Edge must be RISING, FALLING or BOTH. """ self.bbio_gpio.wait_for_edge(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge])
def all_info_files(self) : 'Returns a generator of "Path"s' try : for info_file in list_files_in_dir(self.info_dir): if not os.path.basename(info_file).endswith('.trashinfo') : self.on_non_trashinfo_found() else : yield info_file except OSError: # when directory does not exist pass
def describe(path): """ Return a textual description of the file pointed by this path. Options: - "symbolic link" - "directory" - "'.' directory" - "'..' directory" - "regular file" - "regular empty file" - "non existent" - "entry" """ if os.path.islink(path): return 'symbolic link' elif os.path.isdir(path): if path == '.': return 'directory' elif path == '..': return 'directory' else: if os.path.basename(path) == '.': return "'.' directory" elif os.path.basename(path) == '..': return "'..' directory" else: return 'directory' elif os.path.isfile(path): if os.path.getsize(path) == 0: return 'regular empty file' else: return 'regular file' elif not os.path.exists(path): return 'non existent' else: return 'entry'
def trash(self, file) : """ Trash a file in the appropriate trash directory. If the file belong to the same volume of the trash home directory it will be trashed in the home trash directory. Otherwise it will be trashed in one of the relevant volume trash directories. Each volume can have two trash directories, they are - $volume/.Trash/$uid - $volume/.Trash-$uid Firstly the software attempt to trash the file in the first directory then try to trash in the second trash directory. """ if self._should_skipped_by_specs(file): self.reporter.unable_to_trash_dot_entries(file) return volume_of_file_to_be_trashed = self.volume_of_parent(file) self.reporter.volume_of_file(volume_of_file_to_be_trashed) candidates = self._possible_trash_directories_for( volume_of_file_to_be_trashed) self.try_trash_file_using_candidates(file, volume_of_file_to_be_trashed, candidates)
def persist_trash_info(self, basename, content, logger): """ Create a .trashinfo file in the $trash/info directory. returns the created TrashInfoFile. """ self.ensure_dir(self.info_dir, 0o700) # write trash info index = 0 while True : if index == 0 : suffix = "" elif index < 100: suffix = "_%d" % index else : import random suffix = "_%d" % random.randint(0, 65535) base_id = basename trash_id = base_id + suffix trash_info_basename = trash_id+".trashinfo" dest = os.path.join(self.info_dir, trash_info_basename) try : self.atomic_write(dest, content) logger.debug(".trashinfo created as %s." % dest) return dest except OSError: logger.debug("Attempt for creating %s failed." % dest) index += 1 raise IOError()
def get_process_parser(self, process_id_or_name): """ Returns the ProcessParser for the given process ID or name. It matches by name first. """ if process_id_or_name in self.process_parsers_by_name: return self.process_parsers_by_name[process_id_or_name] else: return self.process_parsers[process_id_or_name]
def add_bpmn_files(self, filenames): """ Add all filenames in the given list to the parser's set. """ for filename in filenames: f = open(filename, 'r') try: self.add_bpmn_xml(ET.parse(f), filename=filename) finally: f.close()
def add_bpmn_xml(self, bpmn, svg=None, filename=None): """ Add the given lxml representation of the BPMN file to the parser's set. :param svg: Optionally, provide the text data for the SVG of the BPMN file :param filename: Optionally, provide the source filename. """ xpath = xpath_eval(bpmn) processes = xpath('.//bpmn:process') for process in processes: process_parser = self.PROCESS_PARSER_CLASS( self, process, svg, filename=filename, doc_xpath=xpath) if process_parser.get_id() in self.process_parsers: raise ValidationException( 'Duplicate process ID', node=process, filename=filename) if process_parser.get_name() in self.process_parsers_by_name: raise ValidationException( 'Duplicate process name', node=process, filename=filename) self.process_parsers[process_parser.get_id()] = process_parser self.process_parsers_by_name[ process_parser.get_name()] = process_parser
def one(nodes, or_none=False): """ Assert that there is exactly one node in the give list, and return it. """ if not nodes and or_none: return None assert len( nodes) == 1, 'Expected 1 result. Received %d results.' % (len(nodes)) return nodes[0]
def xpath_eval(node, extra_ns=None): """ Returns an XPathEvaluator, with namespace prefixes 'bpmn' for http://www.omg.org/spec/BPMN/20100524/MODEL, and additional specified ones """ namespaces = {'bpmn': BPMN_MODEL_NS} if extra_ns: namespaces.update(extra_ns) return lambda path: node.findall(path, namespaces)
def serialize_attrib(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.Attrib`. Example:: <attribute>foobar</attribute> """ elem = etree.Element('attribute') elem.text = op.name return elem
def serialize_pathattrib(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.PathAttrib`. Example:: <path>foobar</path> """ elem = etree.Element('path') elem.text = op.path return elem
def serialize_assign(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.Assign`. Example:: <assign> <name>foobar</name> <value>doodle</value> </assign> """ elem = etree.Element('assign') self.serialize_value(SubElement(elem, 'name'), op.left_attribute) if op.right: self.serialize_value(SubElement(elem, 'value'), op.right) if op.right_attribute: self.serialize_value( SubElement(elem, 'value-attribute'), op.right_attribute) return elem
def serialize_value(self, parent_elem, value): """ Serializes str, Attrib, or PathAttrib objects. Example:: <attribute>foobar</attribute> """ if isinstance(value, (str, int)) or type(value).__name__ == 'str': parent_elem.text = str(value) elif value is None: parent_elem.text = None else: parent_elem.append(value.serialize(self))
def serialize_value_map(self, map_elem, thedict): """ Serializes a dictionary of key/value pairs, where the values are either strings, or Attrib, or PathAttrib objects. Example:: <variable> <name>foo</name> <value>text</value> </variable> <variable> <name>foo2</name> <value><attribute>foobar</attribute></value> </variable> """ for key, value in sorted((str(k), v) for (k, v) in thedict.items()): var_elem = SubElement(map_elem, 'variable') SubElement(var_elem, 'name').text = str(key) value_elem = SubElement(var_elem, 'value') self.serialize_value(value_elem, value) return map_elem
def serialize_value_list(self, list_elem, thelist): """ Serializes a list, where the values are objects of type str, Attrib, or PathAttrib. Example:: <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> """ for value in thelist: value_elem = SubElement(list_elem, 'value') self.serialize_value(value_elem, value) return list_elem
def serialize_operator_equal(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.Equal`. Example:: <equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </equals> """ elem = etree.Element('equals') return self.serialize_value_list(elem, op.args)
def serialize_operator_not_equal(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <not-equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </not-equals> """ elem = etree.Element('not-equals') return self.serialize_value_list(elem, op.args)
def serialize_operator_greater_than(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <greater-than> <value>text</value> <value><attribute>foobar</attribute></value> </greater-than> """ elem = etree.Element('greater-than') return self.serialize_value_list(elem, op.args)
def serialize_operator_less_than(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <less-than> <value>text</value> <value><attribute>foobar</attribute></value> </less-than> """ elem = etree.Element('less-than') return self.serialize_value_list(elem, op.args)
def serialize_operator_match(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <matches> <value>text</value> <value><attribute>foobar</attribute></value> </matches> """ elem = etree.Element('matches') return self.serialize_value_list(elem, op.args)
def serialize_task_spec(self, spec, elem): """ Serializes common attributes of :meth:`SpiffWorkflow.specs.TaskSpec`. """ if spec.id is not None: SubElement(elem, 'id').text = str(spec.id) SubElement(elem, 'name').text = spec.name if spec.description: SubElement(elem, 'description').text = spec.description if spec.manual: SubElement(elem, 'manual') if spec.internal: SubElement(elem, 'internal') SubElement(elem, 'lookahead').text = str(spec.lookahead) inputs = [t.name for t in spec.inputs] outputs = [t.name for t in spec.outputs] self.serialize_value_list(SubElement(elem, 'inputs'), inputs) self.serialize_value_list(SubElement(elem, 'outputs'), outputs) self.serialize_value_map(SubElement(elem, 'data'), spec.data) self.serialize_value_map(SubElement(elem, 'defines'), spec.defines) self.serialize_value_list(SubElement(elem, 'pre-assign'), spec.pre_assign) self.serialize_value_list(SubElement(elem, 'post-assign'), spec.post_assign) # Note: Events are not serialized; this is documented in # the TaskSpec API docs. return elem
def serialize_acquire_mutex(self, spec): """ Serializer for :meth:`SpiffWorkflow.specs.AcquireMutex`. """ elem = etree.Element('acquire-mutex') self.serialize_task_spec(spec, elem) SubElement(elem, 'mutex').text = spec.mutex return elem
def get_event_definition(self): """ Parse the event definition node, and return an instance of Event """ messageEventDefinition = first( self.xpath('.//bpmn:messageEventDefinition')) if messageEventDefinition is not None: return self.get_message_event_definition(messageEventDefinition) timerEventDefinition = first( self.xpath('.//bpmn:timerEventDefinition')) if timerEventDefinition is not None: return self.get_timer_event_definition(timerEventDefinition) raise NotImplementedError( 'Unsupported Intermediate Catch Event: %r', ET.tostring(self.node))
def get_message_event_definition(self, messageEventDefinition): """ Parse the messageEventDefinition node and return an instance of MessageEventDefinition """ messageRef = first(self.xpath('.//bpmn:messageRef')) message = messageRef.get( 'name') if messageRef is not None else self.node.get('name') return MessageEventDefinition(message)
def get_timer_event_definition(self, timerEventDefinition): """ Parse the timerEventDefinition node and return an instance of TimerEventDefinition This currently only supports the timeDate node for specifying an expiry time for the timer. """ timeDate = first(self.xpath('.//bpmn:timeDate')) return TimerEventDefinition( self.node.get('name', timeDate.text), self.parser.parse_condition( timeDate.text, None, None, None, None, self))
def get_all_lanes(self): """ Returns a set of the distinct lane names used in the process (including called activities) """ done = set() lanes = set() def recursive_find(task_spec): if task_spec in done: return done.add(task_spec) if hasattr(task_spec, 'lane') and task_spec.lane: lanes.add(task_spec.lane) if hasattr(task_spec, 'spec'): recursive_find(task_spec.spec.start) for t in task_spec.outputs: recursive_find(t) recursive_find(self.start) return lanes
def get_specs_depth_first(self): """ Get the specs for all processes (including called ones), in depth first order. """ done = set() specs = [self] def recursive_find(task_spec): if task_spec in done: return done.add(task_spec) if hasattr(task_spec, 'spec'): specs.append(task_spec.spec) recursive_find(task_spec.spec.start) for t in task_spec.outputs: recursive_find(t) recursive_find(self.start) return specs
def to_html_string(self): """ Returns an etree HTML node with a document describing the process. This is only supported if the editor provided an SVG representation. """ html = ET.Element('html') head = ET.SubElement(html, 'head') title = ET.SubElement(head, 'title') title.text = self.description body = ET.SubElement(html, 'body') h1 = ET.SubElement(body, 'h1') h1.text = self.description span = ET.SubElement(body, 'span') span.text = '___CONTENT___' html_text = ET.tostring(html) svg_content = '' svg_done = set() for spec in self.get_specs_depth_first(): if spec.svg and spec.svg not in svg_done: svg_content += '<p>' + spec.svg + "</p>" svg_done.add(spec.svg) return html_text.replace('___CONTENT___', svg_content)
def connect(self, callback, *args, **kwargs): """ Connects the event with the given callback. When the signal is emitted, the callback is invoked. .. note:: The signal handler is stored with a hard reference, so you need to make sure to call :class:`disconnect()` if you want the handler to be garbage collected. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback. """ if self.is_connected(callback): raise AttributeError('callback is already connected') if self.hard_subscribers is None: self.hard_subscribers = [] self.hard_subscribers.append((callback, args, kwargs))
def listen(self, callback, *args, **kwargs): """ Like :class:`connect()`, but uses a weak reference instead of a normal reference. The signal is automatically disconnected as soon as the handler is garbage collected. .. note:: Storing signal handlers as weak references means that if your handler is a local function, it may be garbage collected. To prevent this, use :class:`connect()` instead. :type callback: object :param callback: The callback function. :type args: tuple :param args: Optional arguments passed to the callback. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callback. :rtype: :class:`Exscript.util.weakmethod.WeakMethod` :returns: The newly created weak reference to the callback. """ if self.lock is None: self.lock = Lock() with self.lock: if self.is_connected(callback): raise AttributeError('callback is already connected') if self.weak_subscribers is None: self.weak_subscribers = [] ref = weakmethod.ref(callback, self._try_disconnect) self.weak_subscribers.append((ref, args, kwargs)) return ref
def n_subscribers(self): """ Returns the number of connected subscribers. :rtype: int :returns: The number of subscribers. """ hard = self.hard_subscribers and len(self.hard_subscribers) or 0 weak = self.weak_subscribers and len(self.weak_subscribers) or 0 return hard + weak
def is_connected(self, callback): """ Returns True if the event is connected to the given function. :type callback: object :param callback: The callback function. :rtype: bool :returns: Whether the signal is connected to the given function. """ index = self._weakly_connected_index(callback) if index is not None: return True if self.hard_subscribers is None: return False return callback in self._hard_callbacks()
def emit(self, *args, **kwargs): """ Emits the signal, passing the given arguments to the callbacks. If one of the callbacks returns a value other than None, no further callbacks are invoked and the return value of the callback is returned to the caller of emit(). :type args: tuple :param args: Optional arguments passed to the callbacks. :type kwargs: dict :param kwargs: Optional keyword arguments passed to the callbacks. :rtype: object :returns: Returns None if all callbacks returned None. Returns the return value of the last invoked callback otherwise. """ if self.hard_subscribers is not None: for callback, user_args, user_kwargs in self.hard_subscribers: kwargs.update(user_kwargs) result = callback(*args + user_args, **kwargs) if result is not None: return result if self.weak_subscribers is not None: for callback, user_args, user_kwargs in self.weak_subscribers: kwargs.update(user_kwargs) # Even though WeakMethod notifies us when the underlying # function is destroyed, and we remove the item from the # the list of subscribers, there is no guarantee that # this notification has already happened because the garbage # collector may run while this loop is executed. # Disabling the garbage collector temporarily also does # not work, because other threads may be trying to do # the same, causing yet another race condition. # So the only solution is to skip such functions. function = callback.get_function() if function is None: continue result = function(*args + user_args, **kwargs) if result is not None: return result
def _try_disconnect(self, ref): """ Called by the weak reference when its target dies. In other words, we can assert that self.weak_subscribers is not None at this time. """ with self.lock: weak = [s[0] for s in self.weak_subscribers] try: index = weak.index(ref) except ValueError: # subscriber was already removed by a call to disconnect() pass else: self.weak_subscribers.pop(index)
def disconnect(self, callback): """ Disconnects the signal from the given function. :type callback: object :param callback: The callback function. """ if self.weak_subscribers is not None: with self.lock: index = self._weakly_connected_index(callback) if index is not None: self.weak_subscribers.pop(index)[0] if self.hard_subscribers is not None: try: index = self._hard_callbacks().index(callback) except ValueError: pass else: self.hard_subscribers.pop(index)
def deserialize_workflow_spec(self, s_state, filename=None): """ :param s_state: a byte-string with the contents of the packaged workflow archive, or a file-like object. :param filename: the name of the package file. """ if isinstance(s_state, (str, bytes)): s_state = BytesIO(s_state) package_zip = zipfile.ZipFile( s_state, "r", compression=zipfile.ZIP_DEFLATED) config = configparser.ConfigParser() ini_fp = TextIOWrapper( package_zip.open(Packager.METADATA_FILE), encoding="UTF-8") try: config.read_file(ini_fp) finally: ini_fp.close() parser_class = BpmnParser try: parser_class_module = config.get( 'MetaData', 'parser_class_module', fallback=None) except TypeError: # unfortunately the fallback= does not exist on python 2 parser_class_module = config.get( 'MetaData', 'parser_class_module', None) if parser_class_module: mod = __import__(parser_class_module, fromlist=[ config.get('MetaData', 'parser_class')]) parser_class = getattr(mod, config.get('MetaData', 'parser_class')) parser = parser_class() for info in package_zip.infolist(): parts = os.path.split(info.filename) if (len(parts) == 2 and not parts[0] and parts[1].lower().endswith('.bpmn')): # It is in the root of the ZIP and is a BPMN file try: svg = package_zip.read(info.filename[:-5] + '.svg') except KeyError: svg = None bpmn_fp = package_zip.open(info) try: bpmn = ET.parse(bpmn_fp) finally: bpmn_fp.close() parser.add_bpmn_xml( bpmn, svg=svg, filename='%s:%s' % (filename, info.filename)) return parser.get_spec(config.get('MetaData', 'entry_point_process'))
def parse_node(self): """ Parse this node, and all children, returning the connected task spec. """ try: self.task = self.create_task() self.task.documentation = self.parser._parse_documentation( self.node, xpath=self.xpath, task_parser=self) boundary_event_nodes = self.process_xpath( './/bpmn:boundaryEvent[@attachedToRef="%s"]' % self.get_id()) if boundary_event_nodes: parent_task = _BoundaryEventParent( self.spec, '%s.BoundaryEventParent' % self.get_id(), self.task, lane=self.task.lane) self.process_parser.parsed_nodes[ self.node.get('id')] = parent_task parent_task.connect_outgoing( self.task, '%s.FromBoundaryEventParent' % self.get_id(), None, None) for boundary_event in boundary_event_nodes: b = self.process_parser.parse_node(boundary_event) parent_task.connect_outgoing( b, '%s.FromBoundaryEventParent' % boundary_event.get( 'id'), None, None) else: self.process_parser.parsed_nodes[ self.node.get('id')] = self.task children = [] outgoing = self.process_xpath( './/bpmn:sequenceFlow[@sourceRef="%s"]' % self.get_id()) if len(outgoing) > 1 and not self.handles_multiple_outgoing(): raise ValidationException( 'Multiple outgoing flows are not supported for ' 'tasks of type', node=self.node, filename=self.process_parser.filename) for sequence_flow in outgoing: target_ref = sequence_flow.get('targetRef') target_node = one( self.process_xpath('.//*[@id="%s"]' % target_ref)) c = self.process_parser.parse_node(target_node) children.append((c, target_node, sequence_flow)) if children: default_outgoing = self.node.get('default') if not default_outgoing: (c, target_node, sequence_flow) = children[0] default_outgoing = sequence_flow.get('id') for (c, target_node, sequence_flow) in children: self.connect_outgoing( c, target_node, sequence_flow, sequence_flow.get('id') == default_outgoing) return parent_task if boundary_event_nodes else self.task except ValidationException: raise except Exception as ex: exc_info = sys.exc_info() tb = "".join(traceback.format_exception( exc_info[0], exc_info[1], exc_info[2])) LOG.error("%r\n%s", ex, tb) raise ValidationException( "%r" % (ex), node=self.node, filename=self.process_parser.filename)
def create_task(self): """ Create an instance of the task appropriately. A subclass can override this method to get extra information from the node. """ return self.spec_class(self.spec, self.get_task_spec_name(), lane=self.get_lane(), description=self.node.get('name', None))
def connect_outgoing(self, outgoing_task, outgoing_task_node, sequence_flow_node, is_default): """ Connects this task to the indicating outgoing task, with the details in the sequence flow. A subclass can override this method to get extra information from the node. """ self.task.connect_outgoing( outgoing_task, sequence_flow_node.get('id'), sequence_flow_node.get( 'name', None), self.parser._parse_documentation(sequence_flow_node, task_parser=self))
def connect_outgoing(self, taskspec, sequence_flow_id, sequence_flow_name, documentation): """ Connect this task spec to the indicated child. :param sequence_flow_id: The ID of the connecting sequenceFlow node. :param sequence_flow_name: The name of the connecting sequenceFlow node. """ self.connect(taskspec) s = SequenceFlow( sequence_flow_id, sequence_flow_name, documentation, taskspec) self.outgoing_sequence_flows[taskspec.name] = s self.outgoing_sequence_flows_by_id[sequence_flow_id] = s
def connect_outgoing_if(self, condition, taskspec, sequence_flow_id, sequence_flow_name, documentation): """ Connect this task spec to the indicated child, if the condition evaluates to true. This should only be called if the task has a connect_if method (e.g. ExclusiveGateway). :param sequence_flow_id: The ID of the connecting sequenceFlow node. :param sequence_flow_name: The name of the connecting sequenceFlow node. """ self.connect_if(_BpmnCondition(condition), taskspec) s = SequenceFlow( sequence_flow_id, sequence_flow_name, documentation, taskspec) self.outgoing_sequence_flows[taskspec.name] = s self.outgoing_sequence_flows_by_id[sequence_flow_id] = s
def get_outgoing_sequence_names(self): """ Returns a list of the names of outgoing sequences. Some may be None. """ return sorted([s.name for s in list(self.outgoing_sequence_flows_by_id.values())])
def connect_if(self, condition, task_spec): """ Connects a taskspec that is executed if the condition DOES match. condition -- a condition (Condition) taskspec -- the conditional task spec """ assert task_spec is not None self.outputs.append(task_spec) self.cond_task_specs.append((condition, task_spec.name)) task_spec._connect_notify(self)
def _on_complete_hook(self, my_task): """ Runs the task. Should not be called directly. Returns True if completed, False otherwise. """ # Find all matching conditions. outputs = [] for condition, output in self.cond_task_specs: if self.choice is not None and output not in self.choice: continue if condition is None: outputs.append(self._wf_spec.get_task_spec_from_name(output)) continue if not condition._matches(my_task): continue outputs.append(self._wf_spec.get_task_spec_from_name(output)) my_task._sync_children(outputs, Task.FUTURE) for child in my_task.children: child.task_spec._update(child)
def is_completed(self): """ Returns True if the entire Workflow is completed, False otherwise. :rtype: bool :return: Whether the workflow is completed. """ mask = Task.NOT_FINISHED_MASK iter = Task.Iterator(self.task_tree, mask) try: next(iter) except StopIteration: # No waiting tasks found. return True return False
def cancel(self, success=False): """ Cancels all open tasks in the workflow. :type success: bool :param success: Whether the Workflow should be marked as successfully completed. """ self.success = success cancel = [] mask = Task.NOT_FINISHED_MASK for task in Task.Iterator(self.task_tree, mask): cancel.append(task) for task in cancel: task.cancel()
def get_task(self, id): """ Returns the task with the given id. :type id:integer :param id: The id of a task. :rtype: Task :returns: The task with the given id. """ tasks = [task for task in self.get_tasks() if task.id == id] return tasks[0] if len(tasks) == 1 else None
def get_tasks_from_spec_name(self, name): """ Returns all tasks whose spec has the given name. :type name: str :param name: The name of a task spec. :rtype: Task :return: The task that relates to the spec with the given name. """ return [task for task in self.get_tasks() if task.task_spec.name == name]
def get_tasks(self, state=Task.ANY_MASK): """ Returns a list of Task objects with the given state. :type state: integer :param state: A bitmask of states. :rtype: list[Task] :returns: A list of tasks. """ return [t for t in Task.Iterator(self.task_tree, state)]
def complete_task_from_id(self, task_id): """ Runs the task with the given id. :type task_id: integer :param task_id: The id of the Task object. """ if task_id is None: raise WorkflowException(self.spec, 'task_id is None') for task in self.task_tree: if task.id == task_id: return task.complete() msg = 'A task with the given task_id (%s) was not found' % task_id raise WorkflowException(self.spec, msg)
def complete_next(self, pick_up=True, halt_on_manual=True): """ Runs the next task. Returns True if completed, False otherwise. :type pick_up: bool :param pick_up: When True, this method attempts to choose the next task not by searching beginning at the root, but by searching from the position at which the last call of complete_next() left off. :type halt_on_manual: bool :param halt_on_manual: When True, this method will not attempt to complete any tasks that have manual=True. See :meth:`SpiffWorkflow.specs.TaskSpec.__init__` :rtype: bool :returns: True if all tasks were completed, False otherwise. """ # Try to pick up where we left off. blacklist = [] if pick_up and self.last_task is not None: try: iter = Task.Iterator(self.last_task, Task.READY) task = next(iter) except StopIteration: task = None self.last_task = None if task is not None: if not (halt_on_manual and task.task_spec.manual): if task.complete(): self.last_task = task return True blacklist.append(task) # Walk through all ready tasks. for task in Task.Iterator(self.task_tree, Task.READY): for blacklisted_task in blacklist: if task._is_descendant_of(blacklisted_task): continue if not (halt_on_manual and task.task_spec.manual): if task.complete(): self.last_task = task return True blacklist.append(task) # Walk through all waiting tasks. for task in Task.Iterator(self.task_tree, Task.WAITING): task.task_spec._update(task) if not task._has_state(Task.WAITING): self.last_task = task return True return False
def complete_all(self, pick_up=True, halt_on_manual=True): """ Runs all branches until completion. This is a convenience wrapper around :meth:`complete_next`, and the pick_up argument is passed along. :type pick_up: bool :param pick_up: Passed on to each call of complete_next(). :type halt_on_manual: bool :param halt_on_manual: When True, this method will not attempt to complete any tasks that have manual=True. See :meth:`SpiffWorkflow.specs.TaskSpec.__init__` """ while self.complete_next(pick_up, halt_on_manual): pass
def ref(function, callback=None): """ Returns a weak reference to the given method or function. If the callback argument is not None, it is called as soon as the referenced function is garbage deleted. :type function: callable :param function: The function to reference. :type callback: callable :param callback: Called when the function dies. """ try: function.__func__ except AttributeError: return _WeakMethodFree(function, callback) return _WeakMethodBound(function, callback)
def serialize_workflow(self, workflow, include_spec=False, **kwargs): """ :param workflow: the workflow instance to serialize :param include_spec: Always set to False (The CompactWorkflowSerializer only supports workflow serialization) """ if include_spec: raise NotImplementedError( 'Including the spec serialization with the workflow state ' 'is not implemented.') return self._get_workflow_state(workflow)
def deserialize_workflow(self, s_state, workflow_spec=None, read_only=False, **kwargs): """ :param s_state: the state of the workflow as returned by serialize_workflow :param workflow_spec: the Workflow Spec of the workflow (CompactWorkflowSerializer only supports workflow serialization) :param read_only: (Optional) True if the workflow should be restored in READ ONLY mode NB: Additional kwargs passed to the deserialize_workflow method will be passed to the new_workflow method. """ if workflow_spec is None: raise NotImplementedError( 'Including the spec serialization with the workflow state is ' ' not implemented. A \'workflow_spec\' must ' 'be provided.') workflow = self.new_workflow( workflow_spec, read_only=read_only, **kwargs) self._restore_workflow_state(workflow, s_state) return workflow
def new_workflow(self, workflow_spec, read_only=False, **kwargs): """ Create a new workflow instance from the given spec and arguments. :param workflow_spec: the workflow spec to use :param read_only: this should be in read only mode :param kwargs: Any extra kwargs passed to the deserialize_workflow method will be passed through here """ return BpmnWorkflow(workflow_spec, read_only=read_only, **kwargs)
def _start(self, my_task, force=False): """Returns False when successfully fired, True otherwise""" if (not hasattr(my_task, 'subprocess')) or my_task.subprocess is None: my_task.subprocess = subprocess.Popen(self.args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) if my_task.subprocess: my_task.subprocess.poll() if my_task.subprocess.returncode is None: # Still waiting return False else: results = my_task.subprocess.communicate() my_task.results = results return True return False
def _setstate(self, value, force=False): """ Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid. """ if self._state == value: return if value < self._state and not force: raise WorkflowException(self.task_spec, 'state went from %s to %s!' % ( self.get_state_name(), self.state_names[value])) if __debug__: old = self.get_state_name() self._state = value if __debug__: self.log.append("Moving '%s' from %s to %s" % ( self.get_name(), old, self.get_state_name())) self.state_history.append(value) LOG.debug("Moving '%s' (spec=%s) from %s to %s" % ( self.get_name(), self.task_spec.name, old, self.get_state_name()))
def _set_state(self, state, force=True): """ Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid. """ self._setstate(state, True) self.last_state_change = time.time()
def _add_child(self, task_spec, state=MAYBE): """ Adds a new child and assigns the given TaskSpec to it. :type task_spec: TaskSpec :param task_spec: The task spec that is assigned to the new child. :type state: integer :param state: The bitmask of states for the new child. :rtype: Task :returns: The new child task. """ if task_spec is None: raise ValueError(self, '_add_child() requires a TaskSpec') if self._is_predicted() and state & self.PREDICTED_MASK == 0: msg = 'Attempt to add non-predicted child to predicted task' raise WorkflowException(self.task_spec, msg) task = Task(self.workflow, task_spec, self, state=state) task.thread_id = self.thread_id if state == self.READY: task._ready() return task
def _assign_new_thread_id(self, recursive=True): """ Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id. """ self.__class__.thread_id_pool += 1 self.thread_id = self.__class__.thread_id_pool if not recursive: return self.thread_id for child in self: child.thread_id = self.thread_id return self.thread_id
def _sync_children(self, task_specs, state=MAYBE): """ This method syncs up the task's children with the given list of task specs. In other words:: - Add one child for each given TaskSpec, unless that child already exists. - Remove all children for which there is no spec in the given list, unless it is a "triggered" task. .. note:: It is an error if the task has a non-predicted child that is not given in the TaskSpecs. :type task_specs: list(TaskSpec) :param task_specs: The list of task specs that may become children. :type state: integer :param state: The bitmask of states for the new children. """ LOG.debug("Updating children for %s" % self.get_name()) if task_specs is None: raise ValueError('"task_specs" argument is None') add = task_specs[:] # Create a list of all children that are no longer needed. remove = [] for child in self.children: # Triggered tasks are never removed. if child.triggered: continue # Check whether the task needs to be removed. if child.task_spec in add: add.remove(child.task_spec) continue # Non-predicted tasks must not be removed, so they HAVE to be in # the given task spec list. if child._is_definite(): raise WorkflowException(self.task_spec, 'removal of non-predicted child %s' % repr(child)) remove.append(child) # Remove and add the children accordingly. for child in remove: self.children.remove(child) for task_spec in add: self._add_child(task_spec, state)
def _is_descendant_of(self, parent): """ Returns True if parent is in the list of ancestors, returns False otherwise. :type parent: Task :param parent: The parent that is searched in the ancestors. :rtype: bool :returns: Whether the parent was found. """ if self.parent is None: return False if self.parent == parent: return True return self.parent._is_descendant_of(parent)
def _find_child_of(self, parent_task_spec): """ Returns the ancestor that has a task with the given task spec as a parent. If no such ancestor was found, the root task is returned. :type parent_task_spec: TaskSpec :param parent_task_spec: The wanted ancestor. :rtype: Task :returns: The child of the given ancestor. """ if self.parent is None: return self if self.parent.task_spec == parent_task_spec: return self return self.parent._find_child_of(parent_task_spec)
def _find_any(self, task_spec): """ Returns any descendants that have the given task spec assigned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: list(Task) :returns: The tasks objects that are attached to the given task spec. """ tasks = [] if self.task_spec == task_spec: tasks.append(self) for child in self: if child.task_spec != task_spec: continue tasks.append(child) return tasks
def _find_ancestor(self, task_spec): """ Returns the ancestor that has the given task spec assigned. If no such ancestor was found, the root task is returned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: Task :returns: The ancestor. """ if self.parent is None: return self if self.parent.task_spec == task_spec: return self.parent return self.parent._find_ancestor(task_spec)
def _find_ancestor_from_name(self, name): """ Returns the ancestor that has a task with the given name assigned. Returns None if no such ancestor was found. :type name: str :param name: The name of the wanted task. :rtype: Task :returns: The ancestor. """ if self.parent is None: return None if self.parent.get_name() == name: return self.parent return self.parent._find_ancestor_from_name(name)
def _ready(self): """ Marks the task as ready for execution. """ if self._has_state(self.COMPLETED) or self._has_state(self.CANCELLED): return self._set_state(self.READY) self.task_spec._on_ready(self)
def get_state_name(self): """ Returns a textual representation of this Task's state. """ state_name = [] for state, name in list(self.state_names.items()): if self._has_state(state): state_name.append(name) return '|'.join(state_name)
def _inherit_data(self): """ Inherits the data from the parent. """ LOG.debug("'%s' inheriting data from '%s'" % (self.get_name(), self.parent.get_name()), extra=dict(data=self.parent.data)) self.set_data(**self.parent.data)
def cancel(self): """ Cancels the item if it was not yet completed, and removes any children that are LIKELY. """ if self._is_finished(): for child in self.children: child.cancel() return self._set_state(self.CANCELLED) self._drop_children() self.task_spec._on_cancel(self)
def complete(self): """ Called by the associated task to let us know that its state has changed (e.g. from FUTURE to COMPLETED.) """ self._set_state(self.COMPLETED) return self.task_spec._on_complete(self)
def get_dump(self, indent=0, recursive=True): """ Returns the subtree as a string for debugging. :rtype: str :returns: The debug information. """ dbg = (' ' * indent * 2) dbg += '%s/' % self.id dbg += '%s:' % self.thread_id dbg += ' Task of %s' % self.get_name() if self.task_spec.description: dbg += ' (%s)' % self.get_description() dbg += ' State: %s' % self.get_state_name() dbg += ' Children: %s' % len(self.children) if recursive: for child in self.children: dbg += '\n' + child.get_dump(indent + 1) return dbg
def _eval_args(args, my_task): """Parses args and evaluates any Attrib entries""" results = [] for arg in args: if isinstance(arg, Attrib) or isinstance(arg, PathAttrib): results.append(valueof(my_task, arg)) else: results.append(arg) return results
def _eval_kwargs(kwargs, my_task): """Parses kwargs and evaluates any Attrib entries""" results = {} for kwarg, value in list(kwargs.items()): if isinstance(value, Attrib) or isinstance(value, PathAttrib): results[kwarg] = valueof(my_task, value) else: results[kwarg] = value return results
def Serializable(o): """Make sure an object is JSON-serializable Use this to return errors and other info that does not need to be deserialized or does not contain important app data. Best for returning error info and such""" if isinstance(o, (str, dict, int)): return o else: try: json.dumps(o) return o except Exception: LOG.debug("Got a non-serilizeable object: %s" % o) return o.__repr__()
def _send_call(self, my_task): """Sends Celery asynchronous call and stores async call information for retrieval laster""" args, kwargs = None, None if self.args: args = _eval_args(self.args, my_task) if self.kwargs: kwargs = _eval_kwargs(self.kwargs, my_task) LOG.debug( "%s (task id %s) calling %s" % (self.name, my_task.id, self.call), extra=dict(data=dict(args=args, kwargs=kwargs))) async_call = default_app.send_task(self.call, args=args, kwargs=kwargs) my_task._set_internal_data(task_id=async_call.task_id) my_task.async_call = async_call LOG.debug("'%s' called: %s" % (self.call, my_task.async_call.task_id))
def _restart(self, my_task): """ Abort celery task and retry it""" if not my_task._has_state(Task.WAITING): raise WorkflowException(my_task, "Cannot refire a task that is not" "in WAITING state") # Check state of existing call and abort it (save history) if my_task._get_internal_data('task_id') is not None: if not hasattr(my_task, 'async_call'): task_id = my_task._get_internal_data('task_id') my_task.async_call = default_app.AsyncResult(task_id) my_task.deserialized = True my_task.async_call.state # manually refresh async_call = my_task.async_call if async_call.state == 'FAILED': pass elif async_call.state in ['RETRY', 'PENDING', 'STARTED']: async_call.revoke() LOG.info("Celery task '%s' was in %s state and was revoked" % ( async_call.state, async_call)) elif async_call.state == 'SUCCESS': LOG.warning("Celery task '%s' succeeded, but a refire was " "requested" % async_call) self._clear_celery_task_data(my_task) # Retrigger return self._start(my_task)
def _clear_celery_task_data(self, my_task): """ Clear celery task data """ # Save history if 'task_id' in my_task.internal_data: # Save history for diagnostics/forensics history = my_task._get_internal_data('task_history', []) history.append(my_task._get_internal_data('task_id')) del my_task.internal_data['task_id'] my_task._set_internal_data(task_history=history) if 'task_state' in my_task.internal_data: del my_task.internal_data['task_state'] if 'error' in my_task.internal_data: del my_task.internal_data['error'] if hasattr(my_task, 'async_call'): delattr(my_task, 'async_call') if hasattr(my_task, 'deserialized'): delattr(my_task, 'deserialized')
def _start(self, my_task, force=False): """Returns False when successfully fired, True otherwise""" # Deserialize async call if necessary if not hasattr(my_task, 'async_call') and \ my_task._get_internal_data('task_id') is not None: task_id = my_task._get_internal_data('task_id') my_task.async_call = default_app.AsyncResult(task_id) my_task.deserialized = True LOG.debug("Reanimate AsyncCall %s" % task_id) # Make the call if not already done if not hasattr(my_task, 'async_call'): self._send_call(my_task) # Get call status (and manually refresh if deserialized) if getattr(my_task, "deserialized", False): my_task.async_call.state # must manually refresh if deserialized if my_task.async_call.state == 'FAILURE': LOG.debug("Async Call for task '%s' failed: %s" % ( my_task.get_name(), my_task.async_call.info)) info = {} info['traceback'] = my_task.async_call.traceback info['info'] = Serializable(my_task.async_call.info) info['state'] = my_task.async_call.state my_task._set_internal_data(task_state=info) elif my_task.async_call.state == 'RETRY': info = {} info['traceback'] = my_task.async_call.traceback info['info'] = Serializable(my_task.async_call.info) info['state'] = my_task.async_call.state my_task._set_internal_data(task_state=info) elif my_task.async_call.ready(): result = my_task.async_call.result if isinstance(result, Exception): LOG.warn("Celery call %s failed: %s" % (self.call, result)) my_task._set_internal_data(error=Serializable(result)) return False LOG.debug("Completed celery call %s with result=%s" % (self.call, result)) # Format result if self.result_key: data = {self.result_key: result} else: if isinstance(result, dict): data = result else: data = {'result': result} # Load formatted result into internal_data if self.merge_results: merge_dictionary(my_task.internal_data, data) else: my_task.set_data(**data) return True else: LOG.debug("async_call.ready()=%s. TryFire for '%s' " "returning False" % (my_task.async_call.ready(), my_task.get_name())) return False
def ancestors(self): """Returns list of ancestor task specs based on inputs""" results = [] def recursive_find_ancestors(task, stack): for input in task.inputs: if input not in stack: stack.append(input) recursive_find_ancestors(input, stack) recursive_find_ancestors(self, results) return results
def set_data(self, **kwargs): """ Defines the given data field(s) using the given name/value pairs. """ for key in kwargs: if key in self.defines: msg = "Spec data %s can not be modified" % key raise WorkflowException(self, msg) self.data.update(kwargs)
def connect(self, taskspec): """ Connect the *following* task to this one. In other words, the given task is added as an output task. :type taskspec: TaskSpec :param taskspec: The new output task. """ self.outputs.append(taskspec) taskspec._connect_notify(self)
def _predict(self, my_task, seen=None, looked_ahead=0): """ Updates the branch such that all possible future routes are added. Should NOT be overwritten! Instead, overwrite _predict_hook(). :type my_task: Task :param my_task: The associated task in the task tree. :type seen: list[taskspec] :param seen: A list of already visited tasks. :type looked_ahead: integer :param looked_ahead: The depth of the predicted path so far. """ if my_task._is_finished(): return if seen is None: seen = [] elif self in seen: return if not my_task._is_finished(): self._predict_hook(my_task) if not my_task._is_definite(): if looked_ahead + 1 >= self.lookahead: return seen.append(self) for child in my_task.children: child.task_spec._predict(child, seen[:], looked_ahead + 1)
def _update_hook(self, my_task): """ Typically this method should perform the following actions:: - Update the state of the corresponding task. - Update the predictions for its successors. Returning non-False will cause the task to go into READY. Returning any other value will cause no action. """ if my_task._is_predicted(): self._predict(my_task) LOG.debug("'%s'._update_hook says parent (%s, state=%s) " "is_finished=%s" % (self.name, my_task.parent.get_name(), my_task.parent.get_state_name(), my_task.parent._is_finished())) if not my_task.parent._is_finished(): return self.entered_event.emit(my_task.workflow, my_task) my_task._ready()
def _on_ready(self, my_task): """ Return True on success, False otherwise. :type my_task: Task :param my_task: The associated task in the task tree. """ assert my_task is not None self.test() # Acquire locks, if any. for lock in self.locks: mutex = my_task.workflow._get_mutex(lock) if not mutex.testandset(): return # Assign variables, if so requested. for assignment in self.pre_assign: assignment.assign(my_task, my_task) # Run task-specific code. self._on_ready_before_hook(my_task) self.reached_event.emit(my_task.workflow, my_task) self._on_ready_hook(my_task) # Run user code, if any. if self.ready_event.emit(my_task.workflow, my_task): # Assign variables, if so requested. for assignment in self.post_assign: assignment.assign(my_task, my_task) # Release locks, if any. for lock in self.locks: mutex = my_task.workflow._get_mutex(lock) mutex.unlock() self.finished_event.emit(my_task.workflow, my_task)
def _on_complete(self, my_task): """ Return True on success, False otherwise. Should not be overwritten, overwrite _on_complete_hook() instead. :type my_task: Task :param my_task: The associated task in the task tree. :rtype: boolean :returns: True on success, False otherwise. """ assert my_task is not None if my_task.workflow.debug: print("Executing %s: %s (%s)" % ( my_task.task_spec.__class__.__name__, my_task.get_name(), my_task.get_description())) self._on_complete_hook(my_task) # Notify the Workflow. my_task.workflow._task_completed_notify(my_task) if my_task.workflow.debug: if hasattr(my_task.workflow, "outer_workflow"): my_task.workflow.outer_workflow.task_tree.dump() self.completed_event.emit(my_task.workflow, my_task) return True
def create_package(self): """ Creates the package, writing the data out to the provided file-like object. """ # Check that all files exist (and calculate the longest shared path # prefix): self.input_path_prefix = None for filename in self.input_files: if not os.path.isfile(filename): raise ValueError( '%s does not exist or is not a file' % filename) if self.input_path_prefix: full = os.path.abspath(os.path.dirname(filename)) while not (full.startswith(self.input_path_prefix) and self.input_path_prefix): self.input_path_prefix = self.input_path_prefix[:-1] else: self.input_path_prefix = os.path.abspath( os.path.dirname(filename)) # Parse all of the XML: self.bpmn = {} for filename in self.input_files: bpmn = ET.parse(filename) self.bpmn[os.path.abspath(filename)] = bpmn # Now run through pre-parsing and validation: for filename, bpmn in list(self.bpmn.items()): bpmn = self.pre_parse_and_validate(bpmn, filename) self.bpmn[os.path.abspath(filename)] = bpmn # Now check that we can parse it fine: for filename, bpmn in list(self.bpmn.items()): self.parser.add_bpmn_xml(bpmn, filename=filename) self.wf_spec = self.parser.get_spec(self.entry_point_process) # Now package everything: self.package_zip = zipfile.ZipFile( self.package_file, "w", compression=zipfile.ZIP_DEFLATED) done_files = set() for spec in self.wf_spec.get_specs_depth_first(): filename = spec.file if filename not in done_files: done_files.add(filename) bpmn = self.bpmn[os.path.abspath(filename)] self.write_to_package_zip( "%s.bpmn" % spec.name, ET.tostring(bpmn.getroot())) self.write_file_to_package_zip( "src/" + self._get_zip_path(filename), filename) self._call_editor_hook('package_for_editor', spec, filename) self.write_meta_data() self.write_manifest() self.package_zip.close()
def write_file_to_package_zip(self, filename, src_filename): """ Writes a local file in to the zip file and adds it to the manifest dictionary :param filename: The zip file name :param src_filename: the local file name """ f = open(src_filename) with f: data = f.read() self.manifest[filename] = md5hash(data) self.package_zip.write(src_filename, filename)
def write_to_package_zip(self, filename, data): """ Writes data to the zip file and adds it to the manifest dictionary :param filename: The zip file name :param data: the data """ self.manifest[filename] = md5hash(data) self.package_zip.writestr(filename, data)
def write_manifest(self): """ Write the manifest content to the zip file. It must be a predictable order. """ config = configparser.ConfigParser() config.add_section('Manifest') for f in sorted(self.manifest.keys()): config.set('Manifest', f.replace( '\\', '/').lower(), self.manifest[f]) ini = StringIO() config.write(ini) self.manifest_data = ini.getvalue() self.package_zip.writestr(self.MANIFEST_FILE, self.manifest_data)
def pre_parse_and_validate(self, bpmn, filename): """ A subclass can override this method to provide additional parseing or validation. It should call the parent method first. :param bpmn: an lxml tree of the bpmn content :param filename: the source file name This must return the updated bpmn object (or a replacement) """ bpmn = self._call_editor_hook( 'pre_parse_and_validate', bpmn, filename) or bpmn return bpmn
def pre_parse_and_validate_signavio(self, bpmn, filename): """ This is the Signavio specific editor hook for pre-parsing and validation. A subclass can override this method to provide additional parseing or validation. It should call the parent method first. :param bpmn: an lxml tree of the bpmn content :param filename: the source file name This must return the updated bpmn object (or a replacement) """ self._check_for_disconnected_boundary_events_signavio(bpmn, filename) self._fix_call_activities_signavio(bpmn, filename) return bpmn
def _fix_call_activities_signavio(self, bpmn, filename): """ Signavio produces slightly invalid BPMN for call activity nodes... It is supposed to put a reference to the id of the called process in to the calledElement attribute. Instead it stores a string (which is the name of the process - not its ID, in our interpretation) in an extension tag. This code gets the name of the 'subprocess reference', finds a process with a matching name, and sets the calledElement attribute to the id of the process. """ for node in xpath_eval(bpmn)(".//bpmn:callActivity"): calledElement = node.get('calledElement', None) if not calledElement: signavioMetaData = xpath_eval(node, extra_ns={ 'signavio': SIGNAVIO_NS})( './/signavio:signavioMetaData[@metaKey="entry"]') if not signavioMetaData: raise ValidationException( 'No Signavio "Subprocess reference" specified.', node=node, filename=filename) subprocess_reference = one(signavioMetaData).get('metaValue') matches = [] for b in list(self.bpmn.values()): for p in xpath_eval(b)(".//bpmn:process"): if (p.get('name', p.get('id', None)) == subprocess_reference): matches.append(p) if not matches: raise ValidationException( "No matching process definition found for '%s'." % subprocess_reference, node=node, filename=filename) if len(matches) != 1: raise ValidationException( "More than one matching process definition " " found for '%s'." % subprocess_reference, node=node, filename=filename) node.set('calledElement', matches[0].get('id'))