sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def atualizar_software_sat(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('atualizarsoftwaresat') conteudo = resp.json() return RespostaSAT.atualizar_software_sat(conteudo.get('retorno'))
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT
entailment
def extrair_logs(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`. :return: Uma resposta SAT especializada em ``ExtrairLogs``. :rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs """ resp = self._http_post('extrairlogs') conteudo = resp.json() return RespostaExtrairLogs.analisar(conteudo.get('retorno'))
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`. :return: Uma resposta SAT especializada em ``ExtrairLogs``. :rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs
entailment
def bloquear_sat(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('bloquearsat') conteudo = resp.json() return RespostaSAT.bloquear_sat(conteudo.get('retorno'))
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT
entailment
def desbloquear_sat(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('desbloquearsat') conteudo = resp.json() return RespostaSAT.desbloquear_sat(conteudo.get('retorno'))
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT
entailment
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR, codigo_emergencia=None): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('trocarcodigodeativacao', novo_codigo_ativacao=novo_codigo_ativacao, opcao=opcao, codigo_emergencia=codigo_emergencia) conteudo = resp.json() return RespostaSAT.trocar_codigo_de_ativacao(conteudo.get('retorno'))
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT
entailment
def bounds(self) -> typing.Tuple[typing.Tuple[float, float], typing.Tuple[float, float]]: """Return the bounds property in relative coordinates. Bounds is a tuple ((top, left), (height, width))""" ...
Return the bounds property in relative coordinates. Bounds is a tuple ((top, left), (height, width))
entailment
def end(self, value: typing.Union[float, typing.Tuple[float, float]]) -> None: """Set the end property in relative coordinates. End may be a float when graphic is an Interval or a tuple (y, x) when graphic is a Line.""" ...
Set the end property in relative coordinates. End may be a float when graphic is an Interval or a tuple (y, x) when graphic is a Line.
entailment
def start(self, value: typing.Union[float, typing.Tuple[float, float]]) -> None: """Set the end property in relative coordinates. End may be a float when graphic is an Interval or a tuple (y, x) when graphic is a Line.""" ...
Set the end property in relative coordinates. End may be a float when graphic is an Interval or a tuple (y, x) when graphic is a Line.
entailment
def vector(self) -> typing.Tuple[typing.Tuple[float, float], typing.Tuple[float, float]]: """Return the vector property in relative coordinates. Vector will be a tuple of tuples ((y_start, x_start), (y_end, x_end)).""" ...
Return the vector property in relative coordinates. Vector will be a tuple of tuples ((y_start, x_start), (y_end, x_end)).
entailment
def get_data_item_for_hardware_source(self, hardware_source, channel_id: str=None, processor_id: str=None, create_if_needed: bool=False, large_format: bool=False) -> DataItem: """Get the data item associated with hardware source and (optional) channel id and processor_id. Optionally create if missing. :param hardware_source: The hardware_source. :param channel_id: The (optional) channel id. :param processor_id: The (optional) processor id for the channel. :param create_if_needed: Whether to create a new data item if none is found. :return: The associated data item. May be None. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes """ ...
Get the data item associated with hardware source and (optional) channel id and processor_id. Optionally create if missing. :param hardware_source: The hardware_source. :param channel_id: The (optional) channel id. :param processor_id: The (optional) processor id for the channel. :param create_if_needed: Whether to create a new data item if none is found. :return: The associated data item. May be None. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes
entailment
def get_data_item_for_reference_key(self, data_item_reference_key: str=None, create_if_needed: bool=False, large_format: bool=False) -> DataItem: """Get the data item associated with data item reference key. Optionally create if missing. :param data_item_reference_key: The data item reference key. :param create_if_needed: Whether to create a new data item if none is found. :return: The associated data item. May be None. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes """ ...
Get the data item associated with data item reference key. Optionally create if missing. :param data_item_reference_key: The data item reference key. :param create_if_needed: Whether to create a new data item if none is found. :return: The associated data item. May be None. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes
entailment
def show_get_string_message_box(self, caption: str, text: str, accepted_fn, rejected_fn=None, accepted_text: str=None, rejected_text: str=None) -> None: """Show a dialog box and ask for a string. Caption describes the user prompt. Text is the initial/default string. Accepted function must be a function taking one argument which is the resulting text if the user accepts the message dialog. It will only be called if the user clicks OK. Rejected function can be a function taking no arguments, called if the user clicks Cancel. .. versionadded:: 1.0 Scriptable: No """ ...
Show a dialog box and ask for a string. Caption describes the user prompt. Text is the initial/default string. Accepted function must be a function taking one argument which is the resulting text if the user accepts the message dialog. It will only be called if the user clicks OK. Rejected function can be a function taking no arguments, called if the user clicks Cancel. .. versionadded:: 1.0 Scriptable: No
entailment
def create_calibration(self, offset: float=None, scale: float=None, units: str=None) -> Calibration.Calibration: """Create a calibration object with offset, scale, and units. :param offset: The offset of the calibration. :param scale: The scale of the calibration. :param units: The units of the calibration as a string. :return: The calibration object. .. versionadded:: 1.0 Scriptable: Yes Calibrated units and uncalibrated units have the following relationship: :samp:`calibrated_value = offset + value * scale` """ ...
Create a calibration object with offset, scale, and units. :param offset: The offset of the calibration. :param scale: The scale of the calibration. :param units: The units of the calibration as a string. :return: The calibration object. .. versionadded:: 1.0 Scriptable: Yes Calibrated units and uncalibrated units have the following relationship: :samp:`calibrated_value = offset + value * scale`
entailment
def create_data_and_metadata(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None, data_descriptor: DataAndMetadata.DataDescriptor=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. :param data: an ndarray of data. :param intensity_calibration: An optional calibration object. :param dimensional_calibrations: An optional list of calibration objects. :param metadata: A dict of metadata. :param timestamp: A datetime object. :param data_descriptor: A data descriptor describing the dimensions. .. versionadded:: 1.0 Scriptable: Yes """ ...
Create a data_and_metadata object from data. :param data: an ndarray of data. :param intensity_calibration: An optional calibration object. :param dimensional_calibrations: An optional list of calibration objects. :param metadata: A dict of metadata. :param timestamp: A datetime object. :param data_descriptor: A data descriptor describing the dimensions. .. versionadded:: 1.0 Scriptable: Yes
entailment
def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No """ ...
Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No
entailment
def create_data_descriptor(self, is_sequence: bool, collection_dimension_count: int, datum_dimension_count: int) -> DataAndMetadata.DataDescriptor: """Create a data descriptor. :param is_sequence: whether the descriptor describes a sequence of data. :param collection_dimension_count: the number of collection dimensions represented by the descriptor. :param datum_dimension_count: the number of datum dimensions represented by the descriptor. .. versionadded:: 1.0 Scriptable: Yes """ ...
Create a data descriptor. :param is_sequence: whether the descriptor describes a sequence of data. :param collection_dimension_count: the number of collection dimensions represented by the descriptor. :param datum_dimension_count: the number of datum dimensions represented by the descriptor. .. versionadded:: 1.0 Scriptable: Yes
entailment
def make_calibration_row_widget(ui, calibration_observable, label: str=None): """Called when an item (calibration_observable) is inserted into the list widget. Returns a widget.""" calibration_row = ui.create_row_widget() row_label = ui.create_label_widget(label, properties={"width": 60}) row_label.widget_id = "label" offset_field = ui.create_line_edit_widget(properties={"width": 60}) offset_field.widget_id = "offset" scale_field = ui.create_line_edit_widget(properties={"width": 60}) scale_field.widget_id = "scale" units_field = ui.create_line_edit_widget(properties={"width": 60}) units_field.widget_id = "units" float_point_4_converter = Converter.FloatToStringConverter(format="{0:.4f}") offset_field.bind_text(Binding.PropertyBinding(calibration_observable, "offset", converter=float_point_4_converter)) scale_field.bind_text(Binding.PropertyBinding(calibration_observable, "scale", converter=float_point_4_converter)) units_field.bind_text(Binding.PropertyBinding(calibration_observable, "units")) # notice the binding of calibration_index below. calibration_row.add(row_label) calibration_row.add_spacing(12) calibration_row.add(offset_field) calibration_row.add_spacing(12) calibration_row.add(scale_field) calibration_row.add_spacing(12) calibration_row.add(units_field) calibration_row.add_stretch() return calibration_row
Called when an item (calibration_observable) is inserted into the list widget. Returns a widget.
entailment
def add_widget_to_content(self, widget): """Subclasses should call this to add content in the section's top level column.""" self.__section_content_column.add_spacing(4) self.__section_content_column.add(widget)
Subclasses should call this to add content in the section's top level column.
entailment
def __create_list_item_widget(self, ui, calibration_observable): """Called when an item (calibration_observable) is inserted into the list widget. Returns a widget.""" calibration_row = make_calibration_row_widget(ui, calibration_observable) column = ui.create_column_widget() column.add_spacing(4) column.add(calibration_row) return column
Called when an item (calibration_observable) is inserted into the list widget. Returns a widget.
entailment
def _repaint(self, drawing_context): """Repaint the canvas item. This will occur on a thread.""" # canvas size canvas_width = self.canvas_size[1] canvas_height = self.canvas_size[0] left = self.display_limits[0] right = self.display_limits[1] # draw left display limit if left > 0.0: with drawing_context.saver(): drawing_context.begin_path() drawing_context.move_to(left * canvas_width, 1) drawing_context.line_to(left * canvas_width, canvas_height-1) drawing_context.line_width = 2 drawing_context.stroke_style = "#000" drawing_context.stroke() # draw right display limit if right < 1.0: with drawing_context.saver(): drawing_context.begin_path() drawing_context.move_to(right * canvas_width, 1) drawing_context.line_to(right * canvas_width, canvas_height-1) drawing_context.line_width = 2 drawing_context.stroke_style = "#FFF" drawing_context.stroke() # draw border with drawing_context.saver(): drawing_context.begin_path() drawing_context.move_to(0,canvas_height) drawing_context.line_to(canvas_width,canvas_height) drawing_context.line_width = 1 drawing_context.stroke_style = "#444" drawing_context.stroke()
Repaint the canvas item. This will occur on a thread.
entailment
def _repaint(self, drawing_context): """Repaint the canvas item. This will occur on a thread.""" # canvas size canvas_width = self.canvas_size[1] canvas_height = self.canvas_size[0] # draw background if self.background_color: with drawing_context.saver(): drawing_context.begin_path() drawing_context.move_to(0,0) drawing_context.line_to(canvas_width,0) drawing_context.line_to(canvas_width,canvas_height) drawing_context.line_to(0,canvas_height) drawing_context.close_path() drawing_context.fill_style = self.background_color drawing_context.fill() # draw the data, if any if (self.data is not None and len(self.data) > 0): # draw the histogram itself with drawing_context.saver(): drawing_context.begin_path() binned_data = Image.rebin_1d(self.data, int(canvas_width), self.__retained_rebin_1d) if int(canvas_width) != self.data.shape[0] else self.data for i in range(canvas_width): drawing_context.move_to(i, canvas_height) drawing_context.line_to(i, canvas_height * (1 - binned_data[i])) drawing_context.line_width = 1 drawing_context.stroke_style = "#444" drawing_context.stroke()
Repaint the canvas item. This will occur on a thread.
entailment
def color_map_data(self, data: numpy.ndarray) -> None: """Set the data and mark the canvas item for updating. Data should be an ndarray of shape (256, 3) with type uint8 """ self.__color_map_data = data self.update()
Set the data and mark the canvas item for updating. Data should be an ndarray of shape (256, 3) with type uint8
entailment
def _repaint(self, drawing_context: DrawingContext.DrawingContext): """Repaint the canvas item. This will occur on a thread.""" # canvas size canvas_width = self.canvas_size.width canvas_height = self.canvas_size.height with drawing_context.saver(): if self.__color_map_data is not None: rgba_image = numpy.empty((4,) + self.__color_map_data.shape[:-1], dtype=numpy.uint32) Image.get_rgb_view(rgba_image)[:] = self.__color_map_data[numpy.newaxis, :, :] # scalar data assigned to each component of rgb view Image.get_alpha_view(rgba_image)[:] = 255 drawing_context.draw_image(rgba_image, 0, 0, canvas_width, canvas_height)
Repaint the canvas item. This will occur on a thread.
entailment
def get_params(self): "Parameters used to initialize the class" import inspect a = inspect.getargspec(self.__init__)[0] out = dict() for key in a[1:]: value = getattr(self, "_%s" % key, None) out[key] = value return out
Parameters used to initialize the class
entailment
def signature(self): "Instance file name" kw = self.get_params() keys = sorted(kw.keys()) l = [] for k in keys: n = k[0] + k[-1] v = kw[k] if k == 'function_set': v = "_".join([x.__name__[0] + x.__name__[-1] + str(x.nargs) for x in kw[k]]) elif k == 'population_class': v = kw[k].__name__ else: v = str(v) l.append('{0}_{1}'.format(n, v)) return '-'.join(l)
Instance file name
entailment
def population(self): "Class containing the population and all the individuals generated" try: return self._p except AttributeError: self._p = self._population_class(base=self, tournament_size=self._tournament_size, classifier=self.classifier, labels=self._labels, es_extra_test=self.es_extra_test, popsize=self._popsize, random_generations=self._random_generations, negative_selection=self._negative_selection) return self._p
Class containing the population and all the individuals generated
entailment
def random_leaf(self): "Returns a random variable with the associated weight" for i in range(self._number_tries_feasible_ind): var = np.random.randint(self.nvar) v = self._random_leaf(var) if v is None: continue return v raise RuntimeError("Could not find a suitable random leaf")
Returns a random variable with the associated weight
entailment
def random_offspring(self): "Returns an offspring with the associated weight(s)" function_set = self.function_set function_selection = self._function_selection_ins function_selection.density = self.population.density function_selection.unfeasible_functions.clear() for i in range(self._number_tries_feasible_ind): if self._function_selection: func_index = function_selection.tournament() else: func_index = function_selection.random_function() func = function_set[func_index] args = self.get_args(func) if args is None: continue args = [self.population.population[x].position for x in args] f = self._random_offspring(func, args) if f is None: function_selection.unfeasible_functions.add(func_index) continue function_selection[func_index] = f.fitness return f raise RuntimeError("Could not find a suitable random offpsring")
Returns an offspring with the associated weight(s)
entailment
def stopping_criteria(self): "Test whether the stopping criteria has been achieved." if self.stopping_criteria_tl(): return True if self.generations < np.inf: inds = self.popsize * self.generations flag = inds <= len(self.population.hist) else: flag = False if flag: return True est = self.population.estopping if self._tr_fraction < 1: if est is not None and est.fitness_vs == 0: return True esr = self._early_stopping_rounds if self._tr_fraction < 1 and esr is not None and est is not None: position = self.population.estopping.position if position < self.init_popsize: position = self.init_popsize return (len(self.population.hist) + self._unfeasible_counter - position) > esr return flag
Test whether the stopping criteria has been achieved.
entailment
def nclasses(self, v): "Number of classes of v, also sets the labes" if not self.classifier: return 0 if isinstance(v, list): self._labels = np.arange(len(v)) return if not isinstance(v, np.ndarray): v = tonparray(v) self._labels = np.unique(v) return self._labels.shape[0]
Number of classes of v, also sets the labes
entailment
def fit(self, X, y, test_set=None): """Evolutive process""" self._init_time = time.time() self.X = X if self._popsize == "nvar": self._popsize = self.nvar + len(self._input_functions) if isinstance(test_set, str) and test_set == 'shuffle': test_set = self.shuffle_tr2ts() nclasses = self.nclasses(y) if self.classifier and self._multiple_outputs: pass elif nclasses > 2: assert False self._multiclass = True return self.multiclass(X, y, test_set=test_set) self.y = y if test_set is not None: self.Xtest = test_set for _ in range(self._number_tries_feasible_ind): self._logger.info("Starting evolution") try: self.create_population() if self.stopping_criteria_tl(): break except RuntimeError as err: self._logger.info("Done evolution (RuntimeError (%s), hist: %s)" % (err, len(self.population.hist))) return self self._logger.info("Population created (hist: %s)" % len(self.population.hist)) if len(self.population.hist) >= self._tournament_size: break if len(self.population.hist) == 0: raise RuntimeError("Could not find a suitable individual") if len(self.population.hist) < self._tournament_size: self._logger.info("Done evolution (hist: %s)" % len(self.population.hist)) return self if self._remove_raw_inputs: for x in range(self.nvar): self._X[x] = None while not self.stopping_criteria(): try: a = self.random_offspring() except RuntimeError as err: self._logger.info("Done evolution (RuntimeError (%s), hist: %s)" % (err, len(self.population.hist))) return self self.replace(a) self._logger.info("Done evolution (hist: %s)" % len(self.population.hist)) return self
Evolutive process
entailment
def decision_function(self, v=None, X=None): "Decision function i.e. the raw data of the prediction" m = self.model(v=v) return m.decision_function(X)
Decision function i.e. the raw data of the prediction
entailment
def predict(self, v=None, X=None): """In classification this returns the classes, in regression it is equivalent to the decision function""" if X is None: X = v v = None m = self.model(v=v) return m.predict(X)
In classification this returns the classes, in regression it is equivalent to the decision function
entailment
def serve(application, host='127.0.0.1', port=8080): """Gevent-based WSGI-HTTP server.""" # Instantiate the server with a host/port configuration and our application. WSGIServer((host, int(port)), application).serve_forever()
Gevent-based WSGI-HTTP server.
entailment
def get_subscribers(obj): """ Returns the subscribers for a given object. :param obj: Any object. """ ctype = ContentType.objects.get_for_model(obj) return Subscription.objects.filter(content_type=ctype, object_id=obj.pk)
Returns the subscribers for a given object. :param obj: Any object.
entailment
def is_subscribed(user, obj): """ Returns ``True`` if the user is subscribed to the given object. :param user: A ``User`` instance. :param obj: Any object. """ if not user.is_authenticated(): return False ctype = ContentType.objects.get_for_model(obj) try: Subscription.objects.get( user=user, content_type=ctype, object_id=obj.pk) except Subscription.DoesNotExist: return False return True
Returns ``True`` if the user is subscribed to the given object. :param user: A ``User`` instance. :param obj: Any object.
entailment
def _promote(self, name, instantiate=True): """Create a new subclass of Context which incorporates instance attributes and new descriptors. This promotes an instance and its instance attributes up to being a class with class attributes, then returns an instance of that class. """ metaclass = type(self.__class__) contents = self.__dict__.copy() cls = metaclass(str(name), (self.__class__, ), contents) if instantiate: return cls() return cls
Create a new subclass of Context which incorporates instance attributes and new descriptors. This promotes an instance and its instance attributes up to being a class with class attributes, then returns an instance of that class.
entailment
def run_individual(sim_var, reference, neuroml_file, nml_doc, still_included, generate_dir, target, sim_time, dt, simulator, cleanup = True, show=False): """ Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated. """ for var_name in sim_var.keys(): individual_var_names = var_name.split('+') for individual_var_name in individual_var_names: words = individual_var_name.split('/') type, id1 = words[0].split(':') if ':' in words[1]: variable, id2 = words[1].split(':') else: variable = words[1] id2 = None units = words[2] value = sim_var[var_name] pyneuroml.pynml.print_comment_v(' Changing value of %s (%s) in %s (%s) to: %s %s'%(variable, id2, type, id1, value, units)) if type == 'channel': channel = nml_doc.get_by_id(id1) if channel: print("Setting channel %s"%(channel)) if variable == 'vShift': channel.v_shift = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Could not find channel with id %s from expression: %s'%(id1, individual_var_name)) exit() elif type == 'cell': cell = None for c in nml_doc.cells: if c.id == id1: cell = c if variable == 'channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'vShift_channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.v_shift = '%s %s'%(value, units) elif variable == 'channelDensityNernst': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_nernsts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'erev_id': # change all values of erev in channelDensity elements with only this id chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'erev_ion': # change all values of erev in channelDensity elements with this ion chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.ion == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'specificCapacitance': specCap = None for sc in cell.biophysical_properties.membrane_properties.specific_capacitances: if (sc.segment_groups == None and id2 == 'all') or sc.segment_groups == id2 : specCap = sc specCap.value = '%s %s'%(value, units) elif variable == 'resistivity': resistivity = None for rs in cell.biophysical_properties.intracellular_properties.resistivities: if (rs.segment_groups == None and id2 == 'all') or rs.segment_groups == id2 : resistivity = rs resistivity.value = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Unknown variable (%s) in variable expression: %s'%(variable, individual_var_name)) exit() elif type == 'izhikevich2007Cell': izhcell = None for c in nml_doc.izhikevich2007_cells: if c.id == id1: izhcell = c izhcell.__setattr__(variable, '%s %s'%(value, units)) else: pyneuroml.pynml.print_comment_v('Unknown type (%s) in variable expression: %s'%(type, individual_var_name)) new_neuroml_file = '%s/%s'%(generate_dir,os.path.basename(neuroml_file)) if new_neuroml_file == neuroml_file: pyneuroml.pynml.print_comment_v('Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'% \ (neuroml_file, generate_dir)) pyneuroml.pynml.write_neuroml2_file(nml_doc, new_neuroml_file) for include in still_included: inc_loc = '%s/%s'%(os.path.dirname(os.path.abspath(neuroml_file)),include) pyneuroml.pynml.print_comment_v("Copying non included file %s to %s (%s) beside %s"%(inc_loc, generate_dir,os.path.abspath(generate_dir), new_neuroml_file)) shutil.copy(inc_loc, generate_dir) from pyneuroml.tune.NeuroMLSimulation import NeuroMLSimulation sim = NeuroMLSimulation(reference, neuroml_file = new_neuroml_file, target = target, sim_time = sim_time, dt = dt, simulator = simulator, generate_dir = generate_dir, cleanup = cleanup, nml_doc = nml_doc) sim.go() if show: sim.show() return sim.t, sim.volts
Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated.
entailment
def run(self,candidates,parameters): """ Run simulation for each candidate This run method will loop through each candidate and run the simulation corresponding to its parameter values. It will populate an array called traces with the resulting voltage traces for the simulation and return it. """ traces = [] start_time = time.time() if self.num_parallel_evaluations == 1: for candidate_i in range(len(candidates)): candidate = candidates[candidate_i] sim_var = dict(zip(parameters,candidate)) pyneuroml.pynml.print_comment_v('\n\n - RUN %i (%i/%i); variables: %s\n'%(self.count,candidate_i+1,len(candidates),sim_var)) self.count+=1 t,v = self.run_individual(sim_var) traces.append([t,v]) else: import pp ppservers = () job_server = pp.Server(self.num_parallel_evaluations, ppservers=ppservers) pyneuroml.pynml.print_comment_v('Running %i candidates across %i local processes'%(len(candidates),job_server.get_ncpus())) jobs = [] for candidate_i in range(len(candidates)): candidate = candidates[candidate_i] sim_var = dict(zip(parameters,candidate)) pyneuroml.pynml.print_comment_v('\n\n - PARALLEL RUN %i (%i/%i of curr candidates); variables: %s\n'%(self.count,candidate_i+1,len(candidates),sim_var)) self.count+=1 cand_dir = self.generate_dir+"/CANDIDATE_%s"%candidate_i if not os.path.exists(cand_dir): os.mkdir(cand_dir) vars = (sim_var, self.ref, self.neuroml_file, self.nml_doc, self.still_included, cand_dir, self.target, self.sim_time, self.dt, self.simulator) job = job_server.submit(run_individual, vars, (), ("pyneuroml.pynml",'pyneuroml.tune.NeuroMLSimulation','shutil','neuroml')) jobs.append(job) for job_i in range(len(jobs)): job = jobs[job_i] pyneuroml.pynml.print_comment_v("Checking parallel job %i/%i; set running so far: %i"%(job_i,len(jobs),self.count)) t,v = job() traces.append([t,v]) #pyneuroml.pynml.print_comment_v("Obtained: %s"%result) ####job_server.print_stats() job_server.destroy() print("-------------------------------------------") end_time = time.time() tot = (end_time-start_time) pyneuroml.pynml.print_comment_v('Ran %i candidates in %s seconds (~%ss per job)'%(len(candidates),tot,tot/len(candidates))) return traces
Run simulation for each candidate This run method will loop through each candidate and run the simulation corresponding to its parameter values. It will populate an array called traces with the resulting voltage traces for the simulation and return it.
entailment
def prepare(self, context): """Executed prior to processing a request.""" if __debug__: log.debug("Assigning thread local request context.") self.local.context = context
Executed prior to processing a request.
entailment
def register(self, kind, handler): """Register a handler for a given type, class, interface, or abstract base class. View registration should happen within the `start` callback of an extension. For example, to register the previous `json` view example: class JSONExtension: def start(self, context): context.view.register(tuple, json) The approach of explicitly referencing a view handler isn't very easy to override without also replacing the extension originally adding it, however there is another approach. Using named handlers registered as discrete plugins (via the `entry_point` argument in `setup.py`) allows the extension to easily ask "what's my handler?" class JSONExtension: def start(self, context): context.view.register( tuple, context.view.json ) Otherwise unknown attributes of the view registry will attempt to look up a handler plugin by that name. """ if __debug__: # In production this logging is completely skipped, regardless of logging level. if py3 and not pypy: # Where possible, we shorten things to just the cannonical name. log.debug("Registering view handler.", extra=dict(type=name(kind), handler=name(handler))) else: # Canonical name lookup is not entirely reliable on some combinations. log.debug("Registering view handler.", extra=dict(type=repr(kind), handler=repr(handler))) # Add the handler to the pool of candidates. This adds to a list instead of replacing the "dictionary item". self._map.add(kind, handler) return handler
Register a handler for a given type, class, interface, or abstract base class. View registration should happen within the `start` callback of an extension. For example, to register the previous `json` view example: class JSONExtension: def start(self, context): context.view.register(tuple, json) The approach of explicitly referencing a view handler isn't very easy to override without also replacing the extension originally adding it, however there is another approach. Using named handlers registered as discrete plugins (via the `entry_point` argument in `setup.py`) allows the extension to easily ask "what's my handler?" class JSONExtension: def start(self, context): context.view.register( tuple, context.view.json ) Otherwise unknown attributes of the view registry will attempt to look up a handler plugin by that name.
entailment
def static(base, mapping=None, far=('js', 'css', 'gif', 'jpg', 'jpeg', 'png', 'ttf', 'woff')): """Serve files from disk. This utility endpoint factory is meant primarily for use in development environments; in production environments it is better (more efficient, secure, etc.) to serve your static content using a front end load balancer such as Nginx. The first argument, `base`, represents the base path to serve files from. Paths below the attachment point for the generated endpoint will combine this base path with the remaining path elements to determine the file to serve. The second argument is an optional dictionary mapping filename extensions to template engines, for cooperation with the TemplateExtension. (See: https://github.com/marrow/template) The result of attempting to serve a mapped path is a 2-tuple of `("{mapping}:{path}", dict())`. For example, to render all `.html` files as Mako templates, you would attach something like the following: class Root: page = static('/path/to/static/pages', dict(html='mako')) By default the "usual culprits" are served with far-futures cache expiry headers. If you wish to change the extensions searched just assign a new `far` iterable. To disable, assign any falsy value. """ base = abspath(base) @staticmethod def static_handler(context, *parts, **kw): path = normpath(pathjoin(base, *parts)) if __debug__: log.debug("Attempting to serve static file.", extra=dict( request = id(context), base = base, path = path )) if not path.startswith(base): # Ensure we only serve files from the allowed path. raise HTTPForbidden("Cowardly refusing to violate base path policy." if __debug__ else None) if not exists(path): # Do the right thing if the file doesn't actually exist. raise HTTPNotFound() if not isfile(path): # Only serve normal files; no UNIX domain sockets, FIFOs, etc., etc. raise HTTPForbidden("Cowardly refusing to open a non-file." if __debug__ else None) if far and path.rpartition('.')[2] in far: context.response.cache_expires = 60*60*24*365 if mapping: # Handle the mapping of filename extensions to 2-tuples. 'Cause why not? _, _, extension = basename(path).partition('.') if extension in mapping: return mapping[extension] + ':' + path, dict() return open(path, 'rb') return static_handler
Serve files from disk. This utility endpoint factory is meant primarily for use in development environments; in production environments it is better (more efficient, secure, etc.) to serve your static content using a front end load balancer such as Nginx. The first argument, `base`, represents the base path to serve files from. Paths below the attachment point for the generated endpoint will combine this base path with the remaining path elements to determine the file to serve. The second argument is an optional dictionary mapping filename extensions to template engines, for cooperation with the TemplateExtension. (See: https://github.com/marrow/template) The result of attempting to serve a mapped path is a 2-tuple of `("{mapping}:{path}", dict())`. For example, to render all `.html` files as Mako templates, you would attach something like the following: class Root: page = static('/path/to/static/pages', dict(html='mako')) By default the "usual culprits" are served with far-futures cache expiry headers. If you wish to change the extensions searched just assign a new `far` iterable. To disable, assign any falsy value.
entailment
def serve(application, host='127.0.0.1', port=8080): """Diesel-based (greenlet) WSGI-HTTP server. As a minor note, this is crazy. Diesel includes Flask, too. """ # Instantiate the server with a host/port configuration and our application. WSGIApplication(application, port=int(port), iface=host).run()
Diesel-based (greenlet) WSGI-HTTP server. As a minor note, this is crazy. Diesel includes Flask, too.
entailment
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A script for plotting files containing spike time data") parser.add_argument('spiketimeFiles', type=str, metavar='<spiketime file>', help='List of text file containing spike times', nargs='+') parser.add_argument('-format', type=str, metavar='<format>', default=DEFAULTS['format'], help='How the spiketimes are represented on each line of file:\n'+\ 'id_t: id of cell, space(s)/tab(s), time of spike (default);\n'+\ 't_id: time of spike, space(s)/tab(s), id of cell;\n'+\ 'sonata: SONATA format HDF5 file containing spike times') parser.add_argument('-rates', action='store_true', default=DEFAULTS['rates'], help='Show a plot of rates') parser.add_argument('-showPlotsAlready', action='store_true', default=DEFAULTS['show_plots_already'], help='Show plots once generated') parser.add_argument('-saveSpikePlotTo', type=str, metavar='<spiketime plot filename>', default=DEFAULTS['save_spike_plot_to'], help='Name of file in which to save spiketime plot') parser.add_argument('-rateWindow', type=int, metavar='<rate window>', default=DEFAULTS['rate_window'], help='Window for rate calculation in ms') parser.add_argument('-rateBins', type=int, metavar='<rate bins>', default=DEFAULTS['rate_bins'], help='Number of bins for rate histogram') return parser.parse_args()
Parse command-line arguments.
entailment
def simple(application, host='127.0.0.1', port=8080): """Python-standard WSGI-HTTP server for testing purposes. The additional work performed here is to match the default startup output of "waitress". This is not a production quality interface and will be have badly under load. """ # Try to be handy as many terminals allow clicking links. print("serving on http://{0}:{1}".format(host, port)) # Bind and launch the server; this is a blocking operation. make_server(host, int(port), application).serve_forever()
Python-standard WSGI-HTTP server for testing purposes. The additional work performed here is to match the default startup output of "waitress". This is not a production quality interface and will be have badly under load.
entailment
def iiscgi(application): """A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks. This is not a production quality interface and will behave badly under load. """ try: from wsgiref.handlers import IISCGIHandler except ImportError: print("Python 3.2 or newer is required.") if not __debug__: warnings.warn("Interactive debugging and other persistence-based processes will not work.") IISCGIHandler().run(application)
A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks. This is not a production quality interface and will behave badly under load.
entailment
def serve(application, host='127.0.0.1', port=8080, socket=None, **options): """Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details. """ # Allow either on-disk socket (recommended) or TCP/IP socket use. if not socket: bindAddress = (host, int(port)) else: bindAddress = socket # Bind and start the blocking web server interface. WSGIServer(application, bindAddress=bindAddress, **options).run()
Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details.
entailment
def _get_method_kwargs(self): """ Helper method. Returns kwargs needed to filter the correct object. Can also be used to create the correct object. """ method_kwargs = { 'user': self.user, 'content_type': self.ctype, 'object_id': self.content_object.pk, } return method_kwargs
Helper method. Returns kwargs needed to filter the correct object. Can also be used to create the correct object.
entailment
def save(self, *args, **kwargs): """Adds a subscription for the given user to the given object.""" method_kwargs = self._get_method_kwargs() try: subscription = Subscription.objects.get(**method_kwargs) except Subscription.DoesNotExist: subscription = Subscription.objects.create(**method_kwargs) return subscription
Adds a subscription for the given user to the given object.
entailment
def prepare(self, context): """Add the usual suspects to the context. This adds `request`, `response`, and `path` to the `RequestContext` instance. """ if __debug__: log.debug("Preparing request context.", extra=dict(request=id(context))) # Bridge in WebOb `Request` and `Response` objects. # Extensions shouldn't rely on these, using `environ` where possible instead. context.request = Request(context.environ) context.response = Response(request=context.request) # Record the initial path representing the point where a front-end web server bridged to us. context.environ['web.base'] = context.request.script_name # Track the remaining (unprocessed) path elements. context.request.remainder = context.request.path_info.split('/') if context.request.remainder and not context.request.remainder[0]: del context.request.remainder[0] # Track the "breadcrumb list" of dispatch through distinct controllers. context.path = Bread()
Add the usual suspects to the context. This adds `request`, `response`, and `path` to the `RequestContext` instance.
entailment
def dispatch(self, context, consumed, handler, is_endpoint): """Called as dispatch descends into a tier. The base extension uses this to maintain the "current url". """ request = context.request if __debug__: log.debug("Handling dispatch event.", extra=dict( request = id(context), consumed = consumed, handler = safe_name(handler), endpoint = is_endpoint )) # The leading path element (leading slash) requires special treatment. if not consumed and context.request.path_info_peek() == '': consumed = [''] nConsumed = 0 if consumed: # Migrate path elements consumed from the `PATH_INFO` to `SCRIPT_NAME` WSGI environment variables. if not isinstance(consumed, (list, tuple)): consumed = consumed.split('/') for element in consumed: if element == context.request.path_info_peek(): context.request.path_info_pop() nConsumed += 1 else: break # Update the breadcrumb list. context.path.append(Crumb(handler, Path(request.script_name))) if consumed: # Lastly, update the remaining path element list. request.remainder = request.remainder[nConsumed:]
Called as dispatch descends into a tier. The base extension uses this to maintain the "current url".
entailment
def render_none(self, context, result): """Render empty responses.""" context.response.body = b'' del context.response.content_length return True
Render empty responses.
entailment
def render_binary(self, context, result): """Return binary responses unmodified.""" context.response.app_iter = iter((result, )) # This wraps the binary string in a WSGI body iterable. return True
Return binary responses unmodified.
entailment
def render_file(self, context, result): """Perform appropriate metadata wrangling for returned open file handles.""" if __debug__: log.debug("Processing file-like object.", extra=dict(request=id(context), result=repr(result))) response = context.response response.conditional_response = True modified = mktime(gmtime(getmtime(result.name))) response.last_modified = datetime.fromtimestamp(modified) ct, ce = guess_type(result.name) if not ct: ct = 'application/octet-stream' response.content_type, response.content_encoding = ct, ce response.etag = unicode(modified) result.seek(0, 2) # Seek to the end of the file. response.content_length = result.tell() result.seek(0) # Seek back to the start of the file. response.body_file = result return True
Perform appropriate metadata wrangling for returned open file handles.
entailment
def render_generator(self, context, result): """Attempt to serve generator responses through stream encoding. This allows for direct use of cinje template functions, which are generators, as returned views. """ context.response.encoding = 'utf8' context.response.app_iter = ( (i.encode('utf8') if isinstance(i, unicode) else i) # Stream encode unicode chunks. for i in result if i is not None # Skip None values. ) return True
Attempt to serve generator responses through stream encoding. This allows for direct use of cinje template functions, which are generators, as returned views.
entailment
def serve(application, host='127.0.0.1', port=8080): """CherryPy-based WSGI-HTTP server.""" # Instantiate the server with our configuration and application. server = CherryPyWSGIServer((host, int(port)), application, server_name=host) # Try to be handy as many terminals allow clicking links. print("serving on http://{0}:{1}".format(host, port)) # Bind and launch the server; this is a blocking operation. try: server.start() except KeyboardInterrupt: server.stop()
CherryPy-based WSGI-HTTP server.
entailment
def colorize(self, string, rgb=None, ansi=None, bg=None, ansi_bg=None): '''Returns the colored string''' if not isinstance(string, str): string = str(string) if rgb is None and ansi is None: raise TerminalColorMapException( 'colorize: must specify one named parameter: rgb or ansi') if rgb is not None and ansi is not None: raise TerminalColorMapException( 'colorize: must specify only one named parameter: rgb or ansi') if bg is not None and ansi_bg is not None: raise TerminalColorMapException( 'colorize: must specify only one named parameter: bg or ansi_bg') if rgb is not None: (closestAnsi, closestRgb) = self.convert(rgb) elif ansi is not None: (closestAnsi, closestRgb) = (ansi, self.colors[ansi]) if bg is None and ansi_bg is None: return "\033[38;5;{ansiCode:d}m{string:s}\033[0m".format(ansiCode=closestAnsi, string=string) if bg is not None: (closestBgAnsi, unused) = self.convert(bg) elif ansi_bg is not None: (closestBgAnsi, unused) = (ansi_bg, self.colors[ansi_bg]) return "\033[38;5;{ansiCode:d}m\033[48;5;{bf:d}m{string:s}\033[0m".format(ansiCode=closestAnsi, bf=closestBgAnsi, string=string)
Returns the colored string
entailment
def render_serialization(self, context, result): """Render serialized responses.""" resp = context.response serial = context.serialize match = context.request.accept.best_match(serial.types, default_match=self.default) result = serial[match](result) if isinstance(result, str): result = result.decode('utf-8') resp.charset = 'utf-8' resp.content_type = match resp.text = result return True
Render serialized responses.
entailment
def serve(application, host='127.0.0.1', port=8080): """Eventlet-based WSGI-HTTP server. For a more fully-featured Eventlet-capable interface, see also [Spawning](http://pypi.python.org/pypi/Spawning/). """ # Instantiate the server with a bound port and with our application. server(listen(host, int(port)), application)
Eventlet-based WSGI-HTTP server. For a more fully-featured Eventlet-capable interface, see also [Spawning](http://pypi.python.org/pypi/Spawning/).
entailment
def main(args=None): """Main""" vs = [(v-100)*0.001 for v in range(200)] for f in ['IM.channel.nml','Kd.channel.nml']: nml_doc = pynml.read_neuroml2_file(f) for ct in nml_doc.ComponentType: ys = [] for v in vs: req_variables = {'v':'%sV'%v,'vShift':'10mV'} vals = pynml.evaluate_component(ct,req_variables=req_variables) print vals if 'x' in vals: ys.append(vals['x']) if 't' in vals: ys.append(vals['t']) if 'r' in vals: ys.append(vals['r']) ax = pynml.generate_plot([vs],[ys], "Some traces from %s in %s"%(ct.name,f), show_plot_already=False ) print vals plt.show()
Main
entailment
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser( description=("A script which can be run to generate a LEMS " "file to analyse the behaviour of channels in " "NeuroML 2")) parser.add_argument('channelFiles', type=str, nargs='+', metavar='<NeuroML 2 Channel file>', help="Name of the NeuroML 2 file(s)") parser.add_argument('-v', action='store_true', default=DEFAULTS['v'], help="Verbose output") parser.add_argument('-minV', type=int, metavar='<min v>', default=DEFAULTS['minV'], help="Minimum voltage to test (integer, mV), default: %smV"%DEFAULTS['minV']) parser.add_argument('-maxV', type=int, metavar='<max v>', default=DEFAULTS['maxV'], help="Maximum voltage to test (integer, mV), default: %smV"%DEFAULTS['maxV']) parser.add_argument('-temperature', type=float, metavar='<temperature>', default=DEFAULTS['temperature'], help="Temperature (float, celsius), default: %sdegC"%DEFAULTS['temperature']) parser.add_argument('-duration', type=float, metavar='<duration>', default=DEFAULTS['duration'], help="Duration of simulation in ms, default: %sms"%DEFAULTS['duration']) parser.add_argument('-clampDelay', type=float, metavar='<clamp delay>', default=DEFAULTS['clampDelay'], help="Delay before voltage clamp is activated in ms, default: %sms"%DEFAULTS['clampDelay']) parser.add_argument('-clampDuration', type=float, metavar='<clamp duration>', default=DEFAULTS['clampDuration'], help="Duration of voltage clamp in ms, default: %sms"%DEFAULTS['clampDuration']) parser.add_argument('-clampBaseVoltage', type=float, metavar='<clamp base voltage>', default=DEFAULTS['clampBaseVoltage'], help="Clamp base (starting/finishing) voltage in mV, default: %smV"%DEFAULTS['clampBaseVoltage']) parser.add_argument('-stepTargetVoltage', type=float, metavar='<step target voltage>', default=DEFAULTS['stepTargetVoltage'], help=("Voltage in mV through which to step voltage clamps, default: %smV"%DEFAULTS['stepTargetVoltage'])) parser.add_argument('-erev', type=float, metavar='<reversal potential>', default=DEFAULTS['erev'], help="Reversal potential of channel for currents, default: %smV"%DEFAULTS['erev']) parser.add_argument('-scaleDt', type=float, metavar='<scale dt in generated LEMS>', default=DEFAULTS['scaleDt'], help="Scale dt in generated LEMS, default: %s"%DEFAULTS['scaleDt']) parser.add_argument('-caConc', type=float, metavar='<Ca2+ concentration>', default=DEFAULTS['caConc'], help=("Internal concentration of Ca2+ (float, " "concentration in mM), default: %smM"%DEFAULTS['caConc'])) parser.add_argument('-datSuffix', type=str, metavar='<dat suffix>', default=DEFAULTS['datSuffix'], help="String to add to dat file names (before .dat)") parser.add_argument('-norun', action='store_true', default=DEFAULTS['norun'], help=("If used, just generate the LEMS file, " "don't run it")) parser.add_argument('-nogui', action='store_true', default=DEFAULTS['nogui'], help=("Supress plotting of variables and only save " "data to file")) parser.add_argument('-html', action='store_true', default=DEFAULTS['html'], help=("Generate a HTML page featuring the plots for the " "channel")) parser.add_argument('-md', action='store_true', default=DEFAULTS['md'], help=("Generate a (GitHub flavoured) Markdown page featuring the plots for the " "channel")) parser.add_argument('-ivCurve', action='store_true', default=DEFAULTS['ivCurve'], help=("Save currents through voltage clamp at each " "level & plot current vs voltage for ion " "channel")) return parser.parse_args()
Parse command-line arguments.
entailment
def plot_iv_curve(a, hold_v, i, *plt_args, **plt_kwargs): """A single IV curve""" grid = plt_kwargs.pop('grid',True) same_fig = plt_kwargs.pop('same_fig',False) if not len(plt_args): plt_args = ('ko-',) if 'label' not in plt_kwargs: plt_kwargs['label'] = 'Current' if not same_fig: make_iv_curve_fig(a, grid=grid) if type(i) is dict: i = [i[v] for v in hold_v] plt.plot([v*1e3 for v in hold_v], [ii*1e12 for ii in i], *plt_args, **plt_kwargs) plt.legend(loc=2)
A single IV curve
entailment
def root(context): """Multipart AJAX request example. See: http://test.getify.com/mpAjax/description.html """ response = context.response parts = [] for i in range(12): for j in range(12): parts.append(executor.submit(mul, i, j)) def stream(parts, timeout=None): try: for future in as_completed(parts, timeout): mime, result = future.result() result = result.encode('utf8') yield "!!!!!!=_NextPart_{num}\nContent-Type: {mime}\nContent-Length: {length}\n\n".format( num = randint(100000000, 999999999), mime = mime, length = len(result) ).encode('utf8') + result except TimeoutError: for future in parts: future.cancel() response.content_length = None response.app_iter = stream(parts, 0.2) return response
Multipart AJAX request example. See: http://test.getify.com/mpAjax/description.html
entailment
def render_template_with_args_in_file(file, template_file_name, **kwargs): """ Get a file and render the content of the template_file_name with kwargs in a file :param file: A File Stream to write :param template_file_name: path to route with template name :param **kwargs: Args to be rendered in template """ template_file_content = "".join( codecs.open( template_file_name, encoding='UTF-8' ).readlines() ) template_rendered = string.Template(template_file_content).safe_substitute(**kwargs) file.write(template_rendered)
Get a file and render the content of the template_file_name with kwargs in a file :param file: A File Stream to write :param template_file_name: path to route with template name :param **kwargs: Args to be rendered in template
entailment
def create_or_open(file_name, initial_template_file_name, args): """ Creates a file or open the file with file_name name :param file_name: String with a filename :param initial_template_file_name: String with path to initial template :param args: from console to determine path to save the files """ file = None if not os.path.isfile( os.path.join( args['django_application_folder'], file_name ) ): # If file_name does not exists, create file = codecs.open( os.path.join( args['django_application_folder'], file_name ), 'w+', encoding='UTF-8' ) print("Creating {}".format(file_name)) if initial_template_file_name: render_template_with_args_in_file(file, initial_template_file_name, **{}) else: # If file exists, just load the file file = codecs.open( os.path.join( args['django_application_folder'], file_name ), 'a+', encoding='UTF-8' ) return file
Creates a file or open the file with file_name name :param file_name: String with a filename :param initial_template_file_name: String with path to initial template :param args: from console to determine path to save the files
entailment
def generic_insert_module(module_name, args, **kwargs): """ In general we have a initial template and then insert new data, so we dont repeat the schema for each module :param module_name: String with module name :paran **kwargs: Args to be rendered in template """ file = create_or_open( '{}.py'.format(module_name), os.path.join( BASE_TEMPLATES_DIR, '{}_initial.py.tmpl'.format(module_name) ), args ) render_template_with_args_in_file( file, os.path.join( BASE_TEMPLATES_DIR, '{}.py.tmpl'.format(module_name) ), **kwargs ) file.close()
In general we have a initial template and then insert new data, so we dont repeat the schema for each module :param module_name: String with module name :paran **kwargs: Args to be rendered in template
entailment
def sanity_check(args): """ Verify if the work folder is a django app. A valid django app always must have a models.py file :return: None """ if not os.path.isfile( os.path.join( args['django_application_folder'], 'models.py' ) ): print("django_application_folder is not a Django application folder") sys.exit(1)
Verify if the work folder is a django app. A valid django app always must have a models.py file :return: None
entailment
def generic_insert_with_folder(folder_name, file_name, template_name, args): """ In general if we need to put a file on a folder, we use this method """ # First we make sure views are a package instead a file if not os.path.isdir( os.path.join( args['django_application_folder'], folder_name ) ): os.mkdir(os.path.join(args['django_application_folder'], folder_name)) codecs.open( os.path.join( args['django_application_folder'], folder_name, '__init__.py' ), 'w+' ) view_file = create_or_open( os.path.join( folder_name, '{}.py'.format(file_name) ), '', args ) # Load content from template render_template_with_args_in_file( view_file, os.path.join( BASE_TEMPLATES_DIR, template_name ), model_name=args['model_name'], model_prefix=args['model_prefix'], model_name_lower=args['model_name'].lower(), application_name=args['django_application_folder'].split("/")[-1] ) view_file.close()
In general if we need to put a file on a folder, we use this method
entailment
def serve(application, host='127.0.0.1', port=8080, threads=4, **kw): """The recommended development HTTP server. Note that this server performs additional buffering and will not honour chunked encoding breaks. """ # Bind and start the server; this is a blocking process. serve_(application, host=host, port=int(port), threads=int(threads), **kw)
The recommended development HTTP server. Note that this server performs additional buffering and will not honour chunked encoding breaks.
entailment
def show(self): """ Plot the result of the simulation once it's been intialized """ from matplotlib import pyplot as plt if self.already_run: for ref in self.volts.keys(): plt.plot(self.t, self.volts[ref], label=ref) plt.title("Simulation voltage vs time") plt.legend() plt.xlabel("Time [ms]") plt.ylabel("Voltage [mV]") else: pynml.print_comment("First you have to 'go()' the simulation.", True) plt.show()
Plot the result of the simulation once it's been intialized
entailment
def mul(self, a: int = None, b: int = None) -> 'json': """Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST """ if not a or not b: return dict(message="Pass arguments a and b to multiply them together!") return dict(answer=a * b)
Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST
entailment
def colorize(string, rgb=None, ansi=None, bg=None, ansi_bg=None, fd=1): '''Returns the colored string to print on the terminal. This function detects the terminal type and if it is supported and the output is not going to a pipe or a file, then it will return the colored string, otherwise it will return the string without modifications. string = the string to print. Only accepts strings, unicode strings must be encoded in advance. rgb = Rgb color for the text; for example 0xFF0000 is red. ansi = Ansi for the text bg = Rgb color for the background ansi_bg= Ansi color for the background fd = The file descriptor that will be used by print, by default is the stdout ''' #Reinitializes if fd used is different if colorize.fd != fd: colorize.init = False colorize.fd = fd #Checks if it is on a terminal, and if the terminal is recognized if not colorize.init: colorize.init = True colorize.is_term = isatty(fd) if 'TERM' in environ: if environ['TERM'].startswith('xterm'): colorize.cmap = XTermColorMap() elif environ['TERM'] == 'vt100': colorize.cmap = VT100ColorMap() else: colorize.is_term = False else: colorize.is_term = False if colorize.is_term: string = colorize.cmap.colorize(string, rgb, ansi, bg, ansi_bg) return string
Returns the colored string to print on the terminal. This function detects the terminal type and if it is supported and the output is not going to a pipe or a file, then it will return the colored string, otherwise it will return the string without modifications. string = the string to print. Only accepts strings, unicode strings must be encoded in advance. rgb = Rgb color for the text; for example 0xFF0000 is red. ansi = Ansi for the text bg = Rgb color for the background ansi_bg= Ansi color for the background fd = The file descriptor that will be used by print, by default is the stdout
entailment
def mutate(self, context, handler, args, kw): """Inspect and potentially mutate the given handler's arguments. The args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail. """ def cast(arg, val): if arg not in annotations: return cast = annotations[key] try: val = cast(val) except (ValueError, TypeError) as e: parts = list(e.args) parts[0] = parts[0] + " processing argument '{}'".format(arg) e.args = tuple(parts) raise return val annotations = getattr(handler.__func__ if hasattr(handler, '__func__') else handler, '__annotations__', None) if not annotations: return argspec = getfullargspec(handler) arglist = list(argspec.args) if ismethod(handler): del arglist[0] for i, value in enumerate(list(args)): key = arglist[i] if key in annotations: args[i] = cast(key, value) # Convert keyword arguments for key, value in list(items(kw)): if key in annotations: kw[key] = cast(key, value)
Inspect and potentially mutate the given handler's arguments. The args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail.
entailment
def transform(self, context, handler, result): """Transform the value returned by the controller endpoint. This extension transforms returned values if the endpoint has a return type annotation. """ handler = handler.__func__ if hasattr(handler, '__func__') else handler annotation = getattr(handler, '__annotations__', {}).get('return', None) if annotation: return (annotation, result) return result
Transform the value returned by the controller endpoint. This extension transforms returned values if the endpoint has a return type annotation.
entailment
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser( description=("A script which can be run to tune a NeuroML 2 model against a number of target properties. Work in progress!")) parser.add_argument('prefix', type=str, metavar='<prefix>', help="Prefix for optimisation run") parser.add_argument('neuromlFile', type=str, metavar='<neuromlFile>', help="NeuroML2 file containing model") parser.add_argument('target', type=str, metavar='<target>', help="Target in NeuroML2 model") parser.add_argument('parameters', type=str, metavar='<parameters>', help="List of parameter to adjust") parser.add_argument('maxConstraints', type=str, metavar='<max_constraints>', help="Max values for parameters") parser.add_argument('minConstraints', type=str, metavar='<min_constraints>', help="Min values for parameters") parser.add_argument('targetData', type=str, metavar='<targetData>', help="List of name/value pairs for properties extracted from data to judge fitness against") parser.add_argument('weights', type=str, metavar='<weights>', help="Weights to assign to each target name/value pair") parser.add_argument('-simTime', type=float, metavar='<simTime>', default=DEFAULTS['simTime'], help="Simulation duration") parser.add_argument('-dt', type=float, metavar='<dt>', default=DEFAULTS['dt'], help="Simulation timestep") parser.add_argument('-analysisStartTime', type=float, metavar='<analysisStartTime>', default=DEFAULTS['analysisStartTime'], help="Analysis start time") parser.add_argument('-populationSize', type=int, metavar='<populationSize>', default=DEFAULTS['populationSize'], help="Population size") parser.add_argument('-maxEvaluations', type=int, metavar='<maxEvaluations>', default=DEFAULTS['maxEvaluations'], help="Maximum evaluations") parser.add_argument('-numSelected', type=int, metavar='<numSelected>', default=DEFAULTS['numSelected'], help="Number selected") parser.add_argument('-numOffspring', type=int, metavar='<numOffspring>', default=DEFAULTS['numOffspring'], help="Number offspring") parser.add_argument('-mutationRate', type=float, metavar='<mutationRate>', default=DEFAULTS['mutationRate'], help="Mutation rate") parser.add_argument('-numElites', type=int, metavar='<numElites>', default=DEFAULTS['numElites'], help="Number of elites") parser.add_argument('-numParallelEvaluations', type=int, metavar='<numParallelEvaluations>', default=DEFAULTS['numParallelEvaluations'], help="Number of evaluations to run in parallel") parser.add_argument('-seed', type=int, metavar='<seed>', default=DEFAULTS['seed'], help="Seed for optimiser") parser.add_argument('-simulator', type=str, metavar='<simulator>', default=DEFAULTS['simulator'], help="Simulator to run") parser.add_argument('-knownTargetValues', type=str, metavar='<knownTargetValues>', help="List of name/value pairs which represent the known values of the target parameters") parser.add_argument('-nogui', action='store_true', default=DEFAULTS['nogui'], help="Should GUI elements be supressed?") parser.add_argument('-showPlotAlready', action='store_true', default=DEFAULTS['showPlotAlready'], help="Should generated plots be suppressed until show() called?") parser.add_argument('-verbose', action='store_true', default=DEFAULTS['verbose'], help="Verbose mode") parser.add_argument('-dryRun', action='store_true', default=DEFAULTS['dryRun'], help="Dry run; just print setup information") parser.add_argument('-extraReportInfo', type=str, metavar='<extraReportInfo>', default=DEFAULTS['extraReportInfo'], help='Extra tag/value pairs can be put into the report.json: -extraReportInfo=["tag":"value"]') parser.add_argument('-cleanup', action='store_true', default=DEFAULTS['cleanup'], help="Should (some) generated files, e.g. *.dat, be deleted as optimisation progresses?") return parser.parse_args()
Parse command-line arguments.
entailment
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for overlaying POVRay files generated from NeuroML by NeuroML1ToPOVRay.py with cell activity (e.g. as generated from a neuroConstruct simulation)") parser.add_argument('prefix', type=str, metavar='<network prefix>', help='Prefix for files in PovRay, e.g. use PREFIX is files are PREFIX.pov, PREFIX_net.inc, etc.') parser.add_argument('-activity', action='store_true', default=False, help="If this is specified, overlay network activity (not tested!!)") parser.add_argument('-maxV', type=float, metavar='<maxV>', default=50.0, help='Max voltage for colour scale in mV') parser.add_argument('-minV', type=float, metavar='<minV>', default=-90.0, help='Min voltage for colour scale in mV') parser.add_argument('-startTime', type=float, metavar='<startTime>', default=0, help='Time in ms at which to start overlaying the simulation activity') parser.add_argument('-endTime', type=float, metavar='<endTime>', default=100, help='End time of simulation activity in ms') parser.add_argument('-title', type=str, metavar='<title>', default='Movie generated from neuroConstruct simulation', help='Title for movie') parser.add_argument('-left', type=str, metavar='<left info>', default='', help='Text on left') parser.add_argument('-frames', type=int, metavar='<frames>', default=100, help='Number of frames') parser.add_argument('-name', type=str, metavar='<Movie name>', default='output', help='Movie name') return parser.parse_args()
Parse command-line arguments.
entailment
def serve(application, host='127.0.0.1', port=8080, **options): """Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server """ # Wrap our our WSGI application (potentially stack) in a Tornado adapter. container = tornado.wsgi.WSGIContainer(application) # Spin up a Tornado HTTP server using this container. http_server = tornado.httpserver.HTTPServer(container, **options) http_server.listen(int(port), host) # Start and block on the Tornado IO loop. tornado.ioloop.IOLoop.instance().start()
Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server
entailment
def parse_arguments(): """Parse command line arguments""" import argparse parser = argparse.ArgumentParser( description=('pyNeuroML v%s: Python utilities for NeuroML2' % __version__ + "\n libNeuroML v%s"%(neuroml.__version__) + "\n jNeuroML v%s"%JNEUROML_VERSION), usage=('pynml [-h|--help] [<shared options>] ' '<one of the mutually-exclusive options>'), formatter_class=argparse.RawTextHelpFormatter ) shared_options = parser.add_argument_group( title='Shared options', description=('These options can be added to any of the ' 'mutually-exclusive options') ) shared_options.add_argument( '-verbose', action='store_true', default=DEFAULTS['v'], help='Verbose output' ) shared_options.add_argument( '-java_max_memory', metavar='MAX', default=DEFAULTS['default_java_max_memory'], help=('Java memory for jNeuroML, e.g. 400M, 2G (used in\n' '-Xmx argument to java)') ) shared_options.add_argument( '-nogui', action='store_true', default=DEFAULTS['nogui'], help=('Suppress GUI,\n' 'i.e. show no plots, just save results') ) shared_options.add_argument( 'lems_file', type=str, metavar='<LEMS/NeuroML 2 file>', help='LEMS/NeuroML 2 file to process' ) mut_exc_opts_grp = parser.add_argument_group( title='Mutually-exclusive options', description='Only one of these options can be selected' ) mut_exc_opts = mut_exc_opts_grp.add_mutually_exclusive_group(required=False) mut_exc_opts.add_argument( '-sedml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert\n' 'simulation settings (duration, dt, what to save)\n' 'to SED-ML format') ) mut_exc_opts.add_argument( '-neuron', nargs=argparse.REMAINDER, help=('(Via jNeuroML) Load a LEMS file, and convert it to\n' 'NEURON format.\n' 'The full format of the \'-neuron\' option is:\n' '-neuron [-nogui] [-run] [-outputdir dir] <LEMS file>\n' ' -nogui\n' ' do not generate gtaphical elements in NEURON,\n' ' just run, save data, and quit\n' ' -run\n' ' compile NMODL files and run the main NEURON\n' ' hoc file (Linux only currently)\n' ' -outputdir <dir>\n' ' generate NEURON files in directory <dir>\n' ' <LEMS file>\n' ' the LEMS file to use') ) mut_exc_opts.add_argument( '-svg', action='store_true', help=('(Via jNeuroML) Convert NeuroML2 file (network & cells)\n' 'to SVG format view of 3D structure') ) mut_exc_opts.add_argument( '-png', action='store_true', help=('(Via jNeuroML) Convert NeuroML2 file (network & cells)\n' 'to PNG format view of 3D structure') ) mut_exc_opts.add_argument( '-dlems', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to dLEMS format, a distilled form of LEMS in JSON') ) mut_exc_opts.add_argument( '-vertex', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to VERTEX format') ) mut_exc_opts.add_argument( '-xpp', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to XPPAUT format') ) mut_exc_opts.add_argument( '-dnsim', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to DNsim format') ) mut_exc_opts.add_argument( '-brian', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to Brian format') ) mut_exc_opts.add_argument( '-sbml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to SBML format') ) mut_exc_opts.add_argument( '-matlab', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to MATLAB format') ) mut_exc_opts.add_argument( '-cvode', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to C format using CVODE package') ) mut_exc_opts.add_argument( '-nineml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to NineML format') ) mut_exc_opts.add_argument( '-spineml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to SpineML format') ) mut_exc_opts.add_argument( '-sbml-import', metavar=('<SBML file>', 'duration', 'dt'), nargs=3, help=('(Via jNeuroML) Load a SBML file, and convert it\n' 'toLEMS format using values for duration & dt\n' 'in ms (ignoring SBML units)') ) mut_exc_opts.add_argument( '-sbml-import-units', metavar=('<SBML file>', 'duration', 'dt'), nargs=3, help=('(Via jNeuroML) Load a SBML file, and convert it\n' 'to LEMS format using values for duration & dt\n' 'in ms (attempt to extract SBML units; ensure units\n' 'are valid in the SBML!)') ) mut_exc_opts.add_argument( '-vhdl', metavar=('neuronid', '<LEMS file>'), nargs=2, help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to VHDL format') ) mut_exc_opts.add_argument( '-graph', metavar=('level'), nargs=1, help=('Load a NeuroML file, and convert it to a graph using\n' 'GraphViz. Detail is set by level (1, 2, etc.)') ) mut_exc_opts.add_argument( '-matrix', metavar=('level'), nargs=1, help=('Load a NeuroML file, and convert it to a matrix displaying\n' 'connectivity. Detail is set by level (1, 2, etc.)') ) mut_exc_opts.add_argument( '-validate', action='store_true', help=('(Via jNeuroML) Validate NeuroML2 file(s) against the\n' 'latest Schema') ) mut_exc_opts.add_argument( '-validatev1', action='store_true', help=('(Via jNeuroML) Validate NeuroML file(s) against the\n' 'v1.8.1 Schema') ) return parser.parse_args()
Parse command line arguments
entailment
def quick_summary(nml2_doc): ''' Or better just use nml2_doc.summary(show_includes=False) ''' info = 'Contents of NeuroML 2 document: %s\n'%nml2_doc.id membs = inspect.getmembers(nml2_doc) for memb in membs: if isinstance(memb[1], list) and len(memb[1])>0 \ and not memb[0].endswith('_'): info+=' %s:\n ['%memb[0] for entry in memb[1]: extra = '???' extra = entry.name if hasattr(entry,'name') else extra extra = entry.href if hasattr(entry,'href') else extra extra = entry.id if hasattr(entry,'id') else extra info+=" %s (%s),"%(entry, extra) info+=']\n' return info
Or better just use nml2_doc.summary(show_includes=False)
entailment
def execute_command_in_dir(command, directory, verbose=DEFAULTS['v'], prefix="Output: ", env=None): """Execute a command in specific working directory""" if os.name == 'nt': directory = os.path.normpath(directory) print_comment("Executing: (%s) in directory: %s" % (command, directory), verbose) if env is not None: print_comment("Extra env variables %s" % (env), verbose) try: if os.name == 'nt': return_string = subprocess.check_output(command, cwd=directory, shell=True, env=env, close_fds=False) else: return_string = subprocess.check_output(command, cwd=directory, shell=True, stderr=subprocess.STDOUT, env=env, close_fds=True) return_string = return_string.decode("utf-8") # For Python 3 print_comment('Command completed. Output: \n %s%s' % \ (prefix,return_string.replace('\n','\n '+prefix)), verbose) return return_string except AttributeError: # For python 2.6... print_comment_v('Assuming Python 2.6...') return_string = subprocess.Popen(command, cwd=directory, shell=True, stdout=subprocess.PIPE).communicate()[0] return return_string except subprocess.CalledProcessError as e: print_comment_v('*** Problem running command: \n %s'%e) print_comment_v('%s%s'%(prefix,e.output.decode().replace('\n','\n'+prefix))) return None except: print_comment_v('*** Unknown problem running command: %s'%e) return None print_comment("Finished execution", verbose)
Execute a command in specific working directory
entailment
def evaluate_component(comp_type, req_variables={}, parameter_values={}): print_comment('Evaluating %s with req:%s; params:%s'%(comp_type.name,req_variables,parameter_values)) exec_str = '' return_vals = {} from math import exp for p in parameter_values: exec_str+='%s = %s\n'%(p, get_value_in_si(parameter_values[p])) for r in req_variables: exec_str+='%s = %s\n'%(r, get_value_in_si(req_variables[r])) for c in comp_type.Constant: exec_str+='%s = %s\n'%(c.name, get_value_in_si(c.value)) for d in comp_type.Dynamics: for dv in d.DerivedVariable: exec_str+='%s = %s\n'%(dv.name, dv.value) exec_str+='return_vals["%s"] = %s\n'%(dv.name, dv.name) for cdv in d.ConditionalDerivedVariable: for case in cdv.Case: if case.condition: cond = case.condition.replace('.neq.','!=').replace('.eq.','==').replace('.gt.','<').replace('.lt.','<') exec_str+='if ( %s ): %s = %s \n'%(cond, cdv.name, case.value) else: exec_str+='else: %s = %s \n'%(cdv.name, case.value) exec_str+='\n' exec_str+='return_vals["%s"] = %s\n'%(cdv.name, cdv.name) '''print_comment_v(exec_str)''' exec(exec_str) return return_vals
print_comment_v(exec_str)
entailment
def after(self, context, exc=None): """Executed after dispatch has returned and the response populated, prior to anything being sent to the client.""" duration = context._duration = round((time.time() - context._start_time) * 1000) # Convert to ms. delta = unicode(duration) # Default response augmentation. if self.header: context.response.headers[self.header] = delta if self.log: self.log("Response generated in " + delta + " seconds.", extra=dict( duration = duration, request = id(context) ))
Executed after dispatch has returned and the response populated, prior to anything being sent to the client.
entailment
def _process_flat_kwargs(source, kwargs): """Apply a flat namespace transformation to recreate (in some respects) a rich structure. This applies several transformations, which may be nested: `foo` (singular): define a simple value named `foo` `foo` (repeated): define a simple value for placement in an array named `foo` `foo[]`: define a simple value for placement in an array, even if there is only one `foo.<id>`: define a simple value to place in the `foo` array at the identified index By nesting, you may define deeper, more complex structures: `foo.bar`: define a value for the named element `bar` of the `foo` dictionary `foo.<id>.bar`: define a `bar` dictionary element on the array element marked by that ID References to `<id>` represent numeric "attributes", which makes the parent reference be treated as an array, not a dictionary. Exact indexes might not be able to be preserved if there are voids; Python lists are not sparse. No validation of values is performed. """ ordered_arrays = [] # Process arguments one at a time and apply them to the kwargs passed in. for name, value in source.items(): container = kwargs if '.' in name: parts = name.split('.') name = name.rpartition('.')[2] for target, following in zip(parts[:-1], parts[1:]): if following.isnumeric(): # Prepare any use of numeric IDs. container.setdefault(target, [{}]) if container[target] not in ordered_arrays: ordered_arrays.append(container[target]) container = container[target][0] continue container = container.setdefault(target, {}) if name.endswith('[]'): # `foo[]` or `foo.bar[]` etc. name = name[:-2] container.setdefault(name, []) container[name].append(value) continue if name.isnumeric() and container is not kwargs: # trailing identifiers, `foo.<id>` container[int(name)] = value continue if name in container: if not isinstance(container[name], list): container[name] = [container[name]] container[name].append(value) continue container[name] = value for container in ordered_arrays: elements = container[0] del container[:] container.extend(value for name, value in sorted(elements.items()))
Apply a flat namespace transformation to recreate (in some respects) a rich structure. This applies several transformations, which may be nested: `foo` (singular): define a simple value named `foo` `foo` (repeated): define a simple value for placement in an array named `foo` `foo[]`: define a simple value for placement in an array, even if there is only one `foo.<id>`: define a simple value to place in the `foo` array at the identified index By nesting, you may define deeper, more complex structures: `foo.bar`: define a value for the named element `bar` of the `foo` dictionary `foo.<id>.bar`: define a `bar` dictionary element on the array element marked by that ID References to `<id>` represent numeric "attributes", which makes the parent reference be treated as an array, not a dictionary. Exact indexes might not be able to be preserved if there are voids; Python lists are not sparse. No validation of values is performed.
entailment
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for converting NeuroML v2 files into POVRay files for 3D rendering") parser.add_argument('neuroml_file', type=str, metavar='<NeuroML file>', help='NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)') parser.add_argument('-split', action='store_true', default=False, help="If this is specified, generate separate pov files for cells & network. Default is false") parser.add_argument('-background', type=str, metavar='<background colour>', default=_WHITE, help='Colour of background, e.g. <0,0,0,0.55>') parser.add_argument('-movie', action='store_true', default=False, help="If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure") parser.add_argument('-inputs', action='store_true', default=False, help="If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network") parser.add_argument('-conns', action='store_true', default=False, help="If this is specified, show the connections present in the network with lines") parser.add_argument('-conn_points', action='store_true', default=False, help="If this is specified, show the end points of the connections present in the network") parser.add_argument('-v', action='store_true', default=False, help="Verbose output") parser.add_argument('-frames', type=int, metavar='<frames>', default=36, help='Number of frames in movie') parser.add_argument('-posx', type=float, metavar='<position offset x>', default=0, help='Offset position in x dir (0 is centre, 1 is top)') parser.add_argument('-posy', type=float, metavar='<position offset y>', default=0, help='Offset position in y dir (0 is centre, 1 is top)') parser.add_argument('-posz', type=float, metavar='<position offset z>', default=0, help='Offset position in z dir (0 is centre, 1 is top)') parser.add_argument('-viewx', type=float, metavar='<view offset x>', default=0, help='Offset viewing point in x dir (0 is centre, 1 is top)') parser.add_argument('-viewy', type=float, metavar='<view offset y>', default=0, help='Offset viewing point in y dir (0 is centre, 1 is top)') parser.add_argument('-viewz', type=float, metavar='<view offset z>', default=0, help='Offset viewing point in z dir (0 is centre, 1 is top)') parser.add_argument('-scalex', type=float, metavar='<scale position x>', default=1, help='Scale position from network in x dir') parser.add_argument('-scaley', type=float, metavar='<scale position y>', default=1.5, help='Scale position from network in y dir') parser.add_argument('-scalez', type=float, metavar='<scale position z>', default=1, help='Scale position from network in z dir') parser.add_argument('-mindiam', type=float, metavar='<minimum diameter dendrites/axons>', default=0, help='Minimum diameter for dendrites/axons (to improve visualisations)') parser.add_argument('-plane', action='store_true', default=False, help="If this is specified, add a 2D plane below cell/network") parser.add_argument('-segids', action='store_true', default=False, help="Show segment ids") return parser.parse_args()
Parse command-line arguments.
entailment
def _configure(self, config): """Prepare the incoming configuration and ensure certain expected values are present. For example, this ensures BaseExtension is included in the extension list, and populates the logging config. """ config = config or dict() # We really need this to be there. if 'extensions' not in config: config['extensions'] = list() if not any(isinstance(ext, BaseExtension) for ext in config['extensions']): # Always make sure the BaseExtension is present since request/response objects are handy. config['extensions'].insert(0, BaseExtension()) if not any(isinstance(ext, arguments.ArgumentExtension) for ext in config['extensions']): # Prepare a default set of argument mutators. config['extensions'].extend([ arguments.ValidateArgumentsExtension(), arguments.ContextArgsExtension(), arguments.RemainderArgsExtension(), arguments.QueryStringArgsExtension(), arguments.FormEncodedKwargsExtension(), arguments.JSONKwargsExtension(), ]) config['extensions'].append(self) # Allow the application object itself to register callbacks. try: addLoggingLevel('trace', logging.DEBUG - 5) except AttributeError: pass # Tests are skipped on these as we have no particular need to test Python's own logging mechanism. level = config.get('logging', {}).get('level', None) if level: # pragma: no cover logging.basicConfig(level=getattr(logging, level.upper())) elif 'logging' in config: # pragma: no cover logging.config.dictConfig(config['logging']) return config
Prepare the incoming configuration and ensure certain expected values are present. For example, this ensures BaseExtension is included in the extension list, and populates the logging config.
entailment
def serve(self, service='auto', **options): # pragma: no cover """Initiate a web server service to serve this application. You can always use the Application instance as a bare WSGI application, of course. This method is provided as a convienence. Pass in the name of the service you wish to use, and any additional configuration options appropriate for that service. Almost all services accept `host` and `port` options, some also allow you to specify an on-disk `socket`. By default all web servers will listen to `127.0.0.1` (loopback only) on port 8080. """ service = load(service, 'web.server') # We don't bother with a full registry for these one-time lookups. try: service(self, **options) except KeyboardInterrupt: # We catch this as SIG_TERM or ^C are basically the only ways to stop most servers. pass # Notify extensions that the service has returned and we are exiting. for ext in self.__context.extension.signal.stop: ext(self.__context)
Initiate a web server service to serve this application. You can always use the Application instance as a bare WSGI application, of course. This method is provided as a convienence. Pass in the name of the service you wish to use, and any additional configuration options appropriate for that service. Almost all services accept `host` and `port` options, some also allow you to specify an on-disk `socket`. By default all web servers will listen to `127.0.0.1` (loopback only) on port 8080.
entailment
def application(self, environ, start_response): """Process a single WSGI request/response cycle. This is the WSGI handler for WebCore. Depending on the presence of extensions providing WSGI middleware, the `__call__` attribute of the Application instance will either become this, or become the outermost middleware callable. Most apps won't utilize middleware, the extension interface is preferred for most operations in WebCore. They allow for code injection at various intermediary steps in the processing of a request and response. """ context = environ['wc.context'] = self.RequestContext(environ=environ) signals = context.extension.signal # Announce the start of a request cycle. This executes `prepare` and `before` callbacks in the correct order. for ext in signals.pre: ext(context) # Identify the endpoint for this request. is_endpoint, handler = context.dispatch(context, context.root, context.environ['PATH_INFO']) if is_endpoint: try: result = self._execute_endpoint(context, handler, signals) # Process the endpoint. except Exception as e: log.exception("Caught exception attempting to execute the endpoint.") result = HTTPInternalServerError(str(e) if __debug__ else "Please see the logs.") if 'debugger' in context.extension.feature: context.response = result for ext in signals.after: ext(context) # Allow signals to clean up early. raise else: # If no endpoint could be resolved, that's a 404. result = HTTPNotFound("Dispatch failed." if __debug__ else None) if __debug__: log.debug("Result prepared, identifying view handler.", extra=dict( request = id(context), result = safe_name(type(result)) )) # Identify a view capable of handling this result. for view in context.view(result): if view(context, result): break else: # We've run off the bottom of the list of possible views. raise TypeError("No view could be found to handle: " + repr(type(result))) if __debug__: log.debug("View identified, populating response.", extra=dict( request = id(context), view = repr(view), )) for ext in signals.after: ext(context) def capture_done(response): for chunk in response: yield chunk for ext in signals.done: ext(context) # This is really long due to the fact we don't want to capture the response too early. # We need anything up to this point to be able to simply replace `context.response` if needed. return capture_done(context.response.conditional_response_app(environ, start_response))
Process a single WSGI request/response cycle. This is the WSGI handler for WebCore. Depending on the presence of extensions providing WSGI middleware, the `__call__` attribute of the Application instance will either become this, or become the outermost middleware callable. Most apps won't utilize middleware, the extension interface is preferred for most operations in WebCore. They allow for code injection at various intermediary steps in the processing of a request and response.
entailment
def _swap(self): '''Swaps the alignment so that the reference becomes the query and vice-versa. Swaps their names, coordinates etc. The frame is not changed''' self.ref_start, self.qry_start = self.qry_start, self.ref_start self.ref_end, self.qry_end = self.qry_end, self.ref_end self.hit_length_ref, self.hit_length_qry = self.hit_length_qry, self.hit_length_ref self.ref_length, self.qry_length = self.qry_length, self.ref_length self.ref_name, self.qry_name = self.qry_name, self.ref_name
Swaps the alignment so that the reference becomes the query and vice-versa. Swaps their names, coordinates etc. The frame is not changed
entailment
def qry_coords(self): '''Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the query sequence''' return pyfastaq.intervals.Interval(min(self.qry_start, self.qry_end), max(self.qry_start, self.qry_end))
Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the query sequence
entailment
def ref_coords(self): '''Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the reference sequence''' return pyfastaq.intervals.Interval(min(self.ref_start, self.ref_end), max(self.ref_start, self.ref_end))
Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the reference sequence
entailment
def on_same_strand(self): '''Returns true iff the direction of the alignment is the same in the reference and the query''' return (self.ref_start < self.ref_end) == (self.qry_start < self.qry_end)
Returns true iff the direction of the alignment is the same in the reference and the query
entailment
def is_self_hit(self): '''Returns true iff the alignment is of a sequence to itself: names and all coordinates are the same and 100 percent identity''' return self.ref_name == self.qry_name \ and self.ref_start == self.qry_start \ and self.ref_end == self.qry_end \ and self.percent_identity == 100
Returns true iff the alignment is of a sequence to itself: names and all coordinates are the same and 100 percent identity
entailment
def reverse_query(self): '''Changes the coordinates as if the query sequence has been reverse complemented''' self.qry_start = self.qry_length - self.qry_start - 1 self.qry_end = self.qry_length - self.qry_end - 1
Changes the coordinates as if the query sequence has been reverse complemented
entailment
def reverse_reference(self): '''Changes the coordinates as if the reference sequence has been reverse complemented''' self.ref_start = self.ref_length - self.ref_start - 1 self.ref_end = self.ref_length - self.ref_end - 1
Changes the coordinates as if the reference sequence has been reverse complemented
entailment
def to_msp_crunch(self): '''Returns the alignment as a line in MSPcrunch format. The columns are space-separated and are: 1. score 2. percent identity 3. match start in the query sequence 4. match end in the query sequence 5. query sequence name 6. subject sequence start 7. subject sequence end 8. subject sequence name''' # we don't know the alignment score. Estimate it. This approximates 1 for a match. aln_score = int(self.percent_identity * 0.005 * (self.hit_length_ref + self.hit_length_qry)) return ' '.join(str(x) for x in [ aln_score, '{0:.2f}'.format(self.percent_identity), self.qry_start + 1, self.qry_end + 1, self.qry_name, self.ref_start + 1, self.ref_end + 1, self.ref_name ])
Returns the alignment as a line in MSPcrunch format. The columns are space-separated and are: 1. score 2. percent identity 3. match start in the query sequence 4. match end in the query sequence 5. query sequence name 6. subject sequence start 7. subject sequence end 8. subject sequence name
entailment
def qry_coords_from_ref_coord(self, ref_coord, variant_list): '''Given a reference position and a list of variants ([variant.Variant]), works out the position in the query sequence, accounting for indels. Returns a tuple: (position, True|False), where second element is whether or not the ref_coord lies in an indel. If it is, then returns the corresponding start position of the indel in the query''' if self.ref_coords().distance_to_point(ref_coord) > 0: raise Error('Cannot get query coord in qry_coords_from_ref_coord because given ref_coord ' + str(ref_coord) + ' does not lie in nucmer alignment:\n' + str(self)) indel_variant_indexes = [] for i in range(len(variant_list)): if variant_list[i].var_type not in {variant.INS, variant.DEL}: continue if not self.intersects_variant(variant_list[i]): continue if variant_list[i].ref_start <= ref_coord <= variant_list[i].ref_end: return variant_list[i].qry_start, True elif variant_list[i].ref_start < ref_coord: indel_variant_indexes.append(i) distance = ref_coord - min(self.ref_start, self.ref_end) for i in indel_variant_indexes: if variant_list[i].var_type == variant.INS: distance += len(variant_list[i].qry_base) else: assert variant_list[i].var_type == variant.DEL distance -= len(variant_list[i].ref_base) if self.on_same_strand(): return min(self.qry_start, self.qry_end) + distance, False else: return max(self.qry_start, self.qry_end) - distance, False
Given a reference position and a list of variants ([variant.Variant]), works out the position in the query sequence, accounting for indels. Returns a tuple: (position, True|False), where second element is whether or not the ref_coord lies in an indel. If it is, then returns the corresponding start position of the indel in the query
entailment
def _nucmer_command(self, ref, qry, outprefix): '''Construct the nucmer command''' if self.use_promer: command = 'promer' else: command = 'nucmer' command += ' -p ' + outprefix if self.breaklen is not None: command += ' -b ' + str(self.breaklen) if self.diagdiff is not None and not self.use_promer: command += ' -D ' + str(self.diagdiff) if self.diagfactor: command += ' -d ' + str(self.diagfactor) if self.maxgap: command += ' -g ' + str(self.maxgap) if self.maxmatch: command += ' --maxmatch' if self.mincluster is not None: command += ' -c ' + str(self.mincluster) if not self.simplify and not self.use_promer: command += ' --nosimplify' return command + ' ' + ref + ' ' + qry
Construct the nucmer command
entailment
def _delta_filter_command(self, infile, outfile): '''Construct delta-filter command''' command = 'delta-filter' if self.min_id is not None: command += ' -i ' + str(self.min_id) if self.min_length is not None: command += ' -l ' + str(self.min_length) return command + ' ' + infile + ' > ' + outfile
Construct delta-filter command
entailment
def _show_coords_command(self, infile, outfile): '''Construct show-coords command''' command = 'show-coords -dTlro' if not self.coords_header: command += ' -H' return command + ' ' + infile + ' > ' + outfile
Construct show-coords command
entailment
def _write_script(self, script_name, ref, qry, outfile): '''Write commands into a bash script''' f = pyfastaq.utils.open_file_write(script_name) print(self._nucmer_command(ref, qry, 'p'), file=f) print(self._delta_filter_command('p.delta', 'p.delta.filter'), file=f) print(self._show_coords_command('p.delta.filter', outfile), file=f) if self.show_snps: print(self._show_snps_command('p.delta.filter', outfile + '.snps'), file=f) pyfastaq.utils.close(f)
Write commands into a bash script
entailment