sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _get_features_string(self, features=None): """ Generates the extended newick string NHX with extra data about a node.""" string = "" if features is None: features = [] elif features == []: features = self.features for pr in features: if hasattr(self, pr): raw = getattr(self, pr) if type(raw) in ITERABLE_TYPES: raw = '|'.join([str(i) for i in raw]) elif type(raw) == dict: raw = '|'.join( map(lambda x,y: "%s-%s" %(x, y), six.iteritems(raw))) elif type(raw) == str: pass else: raw = str(raw) value = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \ raw) if string != "": string +=":" string +="%s=%s" %(pr, str(value)) if string != "": string = "[&&NHX:"+string+"]" return string
Generates the extended newick string NHX with extra data about a node.
entailment
def get_column_width(column, table): """ Get the character width of a column in a table Parameters ---------- column : int The column index analyze table : list of lists of str The table of rows of strings. For this to be accurate, each string must only be 1 line long. Returns ------- width : int """ width = 3 for row in range(len(table)): cell_width = len(table[row][column]) if cell_width > width: width = cell_width return width
Get the character width of a column in a table Parameters ---------- column : int The column index analyze table : list of lists of str The table of rows of strings. For this to be accurate, each string must only be 1 line long. Returns ------- width : int
entailment
def get_text_mark(ttree): """ makes a simple Text Mark object""" if ttree._orient in ["right"]: angle = 0. ypos = ttree.verts[-1*len(ttree.tree):, 1] if ttree._kwargs["tip_labels_align"]: xpos = [ttree.verts[:, 0].max()] * len(ttree.tree) start = xpos finish = ttree.verts[-1*len(ttree.tree):, 0] align_edges = np.array([(i, i+len(xpos)) for i in range(len(xpos))]) align_verts = np.array(zip(start, ypos) + zip(finish, ypos)) else: xpos = ttree.verts[-1*len(ttree.tree):, 0] elif ttree._orient in ['down']: angle = -90. xpos = ttree.verts[-1*len(ttree.tree):, 0] if ttree._kwargs["tip_labels_align"]: ypos = [ttree.verts[:, 1].min()] * len(ttree.tree) start = ypos finish = ttree.verts[-1*len(ttree.tree):, 1] align_edges = np.array([(i, i+len(ypos)) for i in range(len(ypos))]) align_verts = np.array(zip(xpos, start) + zip(xpos, finish)) else: ypos = ttree.verts[-1*len(ttree.tree):, 1] table = toyplot.data.Table() table['x'] = toyplot.require.scalar_vector(xpos) table['y'] = toyplot.require.scalar_vector(ypos, table.shape[0]) table['text'] = toyplot.broadcast.pyobject(ttree.get_tip_labels(), table.shape[0]) table["angle"] = toyplot.broadcast.scalar(angle, table.shape[0]) table["opacity"] = toyplot.broadcast.scalar(1.0, table.shape[0]) table["title"] = toyplot.broadcast.pyobject(None, table.shape[0]) style = toyplot.style.require(ttree._kwargs["tip_labels_style"], allowed=toyplot.style.allowed.text) default_color = [toyplot.color.black] color = toyplot.color.broadcast( colors=ttree._kwargs["tip_labels_color"], shape=(table.shape[0], 1), default=default_color, ) table["fill"] = color[:, 0] text_mark = toyplot.mark.Text( coordinate_axes=['x', 'y'], table=table, coordinates=['x', 'y'], text=["text"], angle=["angle"], fill=["fill"], opacity=["opacity"], title=["title"], style=style, annotation=True, filename=None ) return text_mark
makes a simple Text Mark object
entailment
def get_edge_mark(ttree): """ makes a simple Graph Mark object""" ## tree style if ttree._kwargs["tree_style"] in ["c", "cladogram"]: a=ttree.edges vcoordinates=ttree.verts else: a=ttree._lines vcoordinates=ttree._coords ## fixed args along='x' vmarker='o' vcolor=None vlshow=False vsize=0. estyle=ttree._kwargs["edge_style"] ## get axes layout = toyplot.layout.graph(a, vcoordinates=vcoordinates) along = toyplot.require.value_in(along, ["x", "y"]) if along == "x": coordinate_axes = ["x", "y"] elif along == "y": coordinate_axes = ["y", "x"] ## broadcast args along axes vlabel = layout.vids vmarker = toyplot.broadcast.pyobject(vmarker, layout.vcount) vsize = toyplot.broadcast.scalar(vsize, layout.vcount) estyle = toyplot.style.require(estyle, allowed=toyplot.style.allowed.line) ## fixed args vcolor = toyplot.color.broadcast(colors=None, shape=layout.vcount, default=toyplot.color.black) vopacity = toyplot.broadcast.scalar(1.0, layout.vcount) vtitle = toyplot.broadcast.pyobject(None, layout.vcount) vstyle = None vlstyle = None ## this could be modified in the future to allow diff color edges ecolor = toyplot.color.broadcast(colors=None, shape=layout.ecount, default=toyplot.color.black) ewidth = toyplot.broadcast.scalar(1.0, layout.ecount) eopacity = toyplot.broadcast.scalar(1.0, layout.ecount) hmarker = toyplot.broadcast.pyobject(None, layout.ecount) mmarker = toyplot.broadcast.pyobject(None, layout.ecount) mposition = toyplot.broadcast.scalar(0.5, layout.ecount) tmarker = toyplot.broadcast.pyobject(None, layout.ecount) ## tables are required if I don't want to edit the class vtable = toyplot.data.Table() vtable["id"] = layout.vids for axis, coordinates in zip(coordinate_axes, layout.vcoordinates.T): vtable[axis] = coordinates #_mark_exportable(vtable, axis) vtable["label"] = vlabel vtable["marker"] = vmarker vtable["size"] = vsize vtable["color"] = vcolor vtable["opacity"] = vopacity vtable["title"] = vtitle etable = toyplot.data.Table() etable["source"] = layout.edges.T[0] #_mark_exportable(etable, "source") etable["target"] = layout.edges.T[1] #_mark_exportable(etable, "target") etable["shape"] = layout.eshapes etable["color"] = ecolor etable["width"] = ewidth etable["opacity"] = eopacity etable["hmarker"] = hmarker etable["mmarker"] = mmarker etable["mposition"] = mposition etable["tmarker"] = tmarker edge_mark = toyplot.mark.Graph( coordinate_axes=['x', 'y'], ecolor=["color"], ecoordinates=layout.ecoordinates, efilename=None, eopacity=["opacity"], eshape=["shape"], esource=["source"], estyle=estyle, etable=etable, etarget=["target"], ewidth=["width"], hmarker=["hmarker"], mmarker=["mmarker"], mposition=["mposition"], tmarker=["tmarker"], vcolor=["color"], vcoordinates=['x', 'y'], vfilename=None, vid=["id"], vlabel=["label"], vlshow=False, vlstyle=None, vmarker=["marker"], vopacity=["opacity"], vsize=["size"], vstyle=None, vtable=vtable, vtitle=["title"], ) return edge_mark
makes a simple Graph Mark object
entailment
def split_styles(mark): """ get shared styles """ markers = [mark._table[key] for key in mark._marker][0] nstyles = [] for m in markers: ## fill and stroke are already rgb() since already in markers msty = toyplot.style.combine({ "fill": m.mstyle['fill'], "stroke": m.mstyle['stroke'], "opacity": m.mstyle["fill-opacity"], }, m.mstyle) msty = _color_fixup(msty) nstyles.append(msty) ## uses 'marker.size' so we need to loop over it lstyles = [] for m in markers: lsty = toyplot.style.combine({ "font-family": "Helvetica", "-toyplot-vertical-align": "middle", "fill": toyplot.color.black, "font-size": "%rpx" % (m.size * 0.75), "stroke": "none", "text-anchor": "middle", }, m.lstyle) ## update fonts fonts = toyplot.font.ReportlabLibrary() layout = toyplot.text.layout(m.label, lsty, fonts) lsty = _color_fixup(layout.style) lstyles.append(lsty) nallkeys = set(itertools.chain(*[i.keys() for i in nstyles])) lallkeys = set(itertools.chain(*[i.keys() for i in lstyles])) nuniquekeys = [] nsharedkeys = [] for key in nallkeys: vals = [nstyles[i].get(key) for i in range(len(nstyles))] if len(set(vals)) > 1: nuniquekeys.append(key) else: nsharedkeys.append(key) luniquekeys = [] lsharedkeys = [] for key in lallkeys: vals = [lstyles[i].get(key) for i in range(len(lstyles))] if len(set(vals)) > 1: luniquekeys.append(key) else: lsharedkeys.append(key) ## keys shared between mark and text markers repeated = set(lsharedkeys).intersection(set(nsharedkeys)) for repeat in repeated: ## if same then keep only one copy of it lidx = lsharedkeys.index(repeat) nidx = nsharedkeys.index(repeat) if lsharedkeys[lidx] == nsharedkeys[nidx]: lsharedkeys.remove(repeat) else: lsharedkeys.remove(repeat) luniquekeys.append(repeat) nsharedkeys.remove(repeat) nuniquekeys.append(repeat) ## check node values natt = ["%s:%s" % (key, nstyles[0][key]) for key in sorted(nsharedkeys)] latt = ["%s:%s" % (key, lstyles[0][key]) for key in sorted(lsharedkeys)] shared_styles = ";".join(natt+latt) unique_styles = { "node": [{k:v for k,v in nstyles[idx].items() if k in nuniquekeys} for idx in range(len(markers))], "text": [{k:v for k,v in lstyles[idx].items() if k in luniquekeys} for idx in range(len(markers))] } return shared_styles, unique_styles
get shared styles
entailment
def center_cell_text(cell): """ Horizontally center the text within a cell's grid Like this:: +---------+ +---------+ | foo | --> | foo | +---------+ +---------+ Parameters ---------- cell : dashtable.data2rst.Cell Returns ------- cell : dashtable.data2rst.Cell """ lines = cell.text.split('\n') cell_width = len(lines[0]) - 2 truncated_lines = [''] for i in range(1, len(lines) - 1): truncated = lines[i][2:len(lines[i]) - 2].rstrip() truncated_lines.append(truncated) truncated_lines.append('') max_line_length = get_longest_line_length('\n'.join(truncated_lines)) remainder = cell_width - max_line_length left_width = math.floor(remainder / 2) left_space = left_width * ' ' for i in range(len(truncated_lines)): truncated_lines[i] = left_space + truncated_lines[i] right_width = cell_width - len(truncated_lines[i]) truncated_lines[i] += right_width * ' ' for i in range(1, len(lines) - 1): lines[i] = ''.join([ lines[i][0], truncated_lines[i], lines[i][-1] ]) cell.text = '\n'.join(lines) return cell
Horizontally center the text within a cell's grid Like this:: +---------+ +---------+ | foo | --> | foo | +---------+ +---------+ Parameters ---------- cell : dashtable.data2rst.Cell Returns ------- cell : dashtable.data2rst.Cell
entailment
def hamming_distance(word1, word2): """ Computes the Hamming distance. [Reference]: https://en.wikipedia.org/wiki/Hamming_distance [Article]: Hamming, Richard W. (1950), "Error detecting and error correcting codes", Bell System Technical Journal 29 (2): 147–160 """ from operator import ne if len(word1) != len(word2): raise WrongLengthException('The words need to be of the same length!') return sum(map(ne, word1, word2))
Computes the Hamming distance. [Reference]: https://en.wikipedia.org/wiki/Hamming_distance [Article]: Hamming, Richard W. (1950), "Error detecting and error correcting codes", Bell System Technical Journal 29 (2): 147–160
entailment
def polygen(*coefficients): '''Polynomial generating function''' if not coefficients: return lambda i: 0 else: c0 = coefficients[0] coefficients = coefficients[1:] def _(i): v = c0 for c in coefficients: v += c*i i *= i return v return _
Polynomial generating function
entailment
def timeseries(name='', backend=None, date=None, data=None, **kwargs): '''Create a new :class:`dynts.TimeSeries` instance using a ``backend`` and populating it with provided the data. :parameter name: optional timeseries name. For multivarate timeseries the :func:`dynts.tsname` utility function can be used to build it. :parameter backend: optional backend name. If not provided, numpy will be used. :parameter date: optional iterable over dates. :parameter data: optional iterable over data. ''' backend = backend or settings.backend TS = BACKENDS.get(backend) if not TS: raise InvalidBackEnd( 'Could not find a TimeSeries class %s' % backend ) return TS(name=name, date=date, data=data, **kwargs)
Create a new :class:`dynts.TimeSeries` instance using a ``backend`` and populating it with provided the data. :parameter name: optional timeseries name. For multivarate timeseries the :func:`dynts.tsname` utility function can be used to build it. :parameter backend: optional backend name. If not provided, numpy will be used. :parameter date: optional iterable over dates. :parameter data: optional iterable over data.
entailment
def ensure_table_strings(table): """ Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str """ for row in range(len(table)): for column in range(len(table[row])): table[row][column] = str(table[row][column]) return table
Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str
entailment
def left_sections(self): """ The number of sections that touch the left side. During merging, the cell's text will grow to include other cells. This property keeps track of the number of sections that are touching the left side. For example:: +-----+-----+ section --> | foo | dog | <-- section +-----+-----+ section --> | cat | +-----+ Has 2 sections on the left, but 1 on the right Returns ------- sections : int The number of sections on the left """ lines = self.text.split('\n') sections = 0 for i in range(len(lines)): if lines[i].startswith('+'): sections += 1 sections -= 1 return sections
The number of sections that touch the left side. During merging, the cell's text will grow to include other cells. This property keeps track of the number of sections that are touching the left side. For example:: +-----+-----+ section --> | foo | dog | <-- section +-----+-----+ section --> | cat | +-----+ Has 2 sections on the left, but 1 on the right Returns ------- sections : int The number of sections on the left
entailment
def right_sections(self): """ The number of sections that touch the right side. Returns ------- sections : int The number of sections on the right """ lines = self.text.split('\n') sections = 0 for i in range(len(lines)): if lines[i].endswith('+'): sections += 1 return sections - 1
The number of sections that touch the right side. Returns ------- sections : int The number of sections on the right
entailment
def top_sections(self): """ The number of sections that touch the top side. Returns ------- sections : int The number of sections on the top """ top_line = self.text.split('\n')[0] sections = len(top_line.split('+')) - 2 return sections
The number of sections that touch the top side. Returns ------- sections : int The number of sections on the top
entailment
def bottom_sections(self): """ The number of cells that touch the bottom side. Returns ------- sections : int The number of sections on the top """ bottom_line = self.text.split('\n')[-1] sections = len(bottom_line.split('+')) - 2 return sections
The number of cells that touch the bottom side. Returns ------- sections : int The number of sections on the top
entailment
def is_header(self): """ Whether or not the cell is a header Any header cell will have "=" instead of "-" on its border. For example, this is a header cell:: +-----+ | foo | +=====+ while this cell is not:: +-----+ | foo | +-----+ Returns ------- bool Whether or not the cell is a header """ bottom_line = self.text.split('\n')[-1] if is_only(bottom_line, ['+', '=']): return True return False
Whether or not the cell is a header Any header cell will have "=" instead of "-" on its border. For example, this is a header cell:: +-----+ | foo | +=====+ while this cell is not:: +-----+ | foo | +-----+ Returns ------- bool Whether or not the cell is a header
entailment
def get_git_changeset(filename=None): """Returns a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers. """ dirname = os.path.dirname(filename or __file__) git_show = sh('git show --pretty=format:%ct --quiet HEAD', cwd=dirname) timestamp = git_show.partition('\n')[0] try: timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) except ValueError: return None return timestamp.strftime('%Y%m%d%H%M%S')
Returns a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers.
entailment
def headers_present(html_string): """ Checks if the html table contains headers and returns True/False Parameters ---------- html_string : str Returns ------- bool """ try: from bs4 import BeautifulSoup except ImportError: print("ERROR: You must have BeautifulSoup to use html2data") return soup = BeautifulSoup(html_string, 'html.parser') table = soup.find('table') if not table: return False th = table.findAll('th') if len(th) > 0: return True else: return False
Checks if the html table contains headers and returns True/False Parameters ---------- html_string : str Returns ------- bool
entailment
def extract_spans(html_string): """ Creates a list of the spanned cell groups of [row, column] pairs. Parameters ---------- html_string : str Returns ------- list of lists of lists of int """ try: from bs4 import BeautifulSoup except ImportError: print("ERROR: You must have BeautifulSoup to use html2data") return soup = BeautifulSoup(html_string, 'html.parser') table = soup.find('table') if not table: return [] trs = table.findAll('tr') if len(trs) == 0: return [] spans = [] for tr in range(len(trs)): if tr == 0: ths = trs[tr].findAll('th') if len(ths) == 0: ths = trs[tr].findAll('td') tds = ths else: tds = trs[tr].findAll('td') column = 0 for td in tds: r_span_count = 1 c_span_count = 1 current_column = column if td.has_attr('rowspan'): r_span_count = int(td['rowspan']) if td.has_attr('colspan'): c_span_count = int(td['colspan']) column += c_span_count else: column += 1 new_span = [] for r_index in range(tr, tr + r_span_count): for c_index in range(current_column, column): if not get_span(spans, r_index, c_index): new_span.append([r_index, c_index]) if len(new_span) > 0: spans.append(new_span) return spans
Creates a list of the spanned cell groups of [row, column] pairs. Parameters ---------- html_string : str Returns ------- list of lists of lists of int
entailment
def translation(first, second): """Create an index of mapped letters (zip to dict).""" if len(first) != len(second): raise WrongLengthException('The lists are not of the same length!') return dict(zip(first, second))
Create an index of mapped letters (zip to dict).
entailment
def process_tag(node): """ Recursively go through a tag's children, converting them, then convert the tag itself. """ text = '' exceptions = ['table'] for element in node.children: if isinstance(element, NavigableString): text += element elif not node.name in exceptions: text += process_tag(element) try: convert_fn = globals()["convert_%s" % node.name.lower()] text = convert_fn(node, text) except KeyError: pass return text
Recursively go through a tag's children, converting them, then convert the tag itself.
entailment
def laggeddates(ts, step=1): '''Lagged iterator over dates''' if step == 1: dates = ts.dates() if not hasattr(dates, 'next'): dates = dates.__iter__() dt0 = next(dates) for dt1 in dates: yield dt1, dt0 dt0 = dt1 else: while done: done += 1 lag.append(next(dates)) for dt1 in dates: lag.append(dt1) yield dt1, lag.pop(0)
Lagged iterator over dates
entailment
def make_skiplist(*args, use_fallback=False): '''Create a new skiplist''' sl = fallback.Skiplist if use_fallback else Skiplist return sl(*args)
Create a new skiplist
entailment
def data2md(table): """ Creates a markdown table. The first row will be headers. Parameters ---------- table : list of lists of str A list of rows containing strings. If any of these strings consist of multiple lines, they will be converted to single line because markdown tables do not support multiline cells. Returns ------- str The markdown formatted string Example ------- >>> table_data = [ ... ["Species", "Coolness"], ... ["Dog", "Awesome"], ... ["Cat", "Meh"], ... ] >>> print(data2md(table_data)) | Species | Coolness | |---------|----------| | Dog | Awesome | | Cat | Meh | """ table = copy.deepcopy(table) table = ensure_table_strings(table) table = multis_2_mono(table) table = add_cushions(table) widths = [] for column in range(len(table[0])): widths.append(get_column_width(column, table)) output = '|' for i in range(len(table[0])): output = ''.join( [output, center_line(widths[i], table[0][i]), '|']) output = output + '\n|' for i in range(len(table[0])): output = ''.join([ output, center_line(widths[i], "-" * widths[i]), '|']) output = output + '\n|' for row in range(1, len(table)): for column in range(len(table[row])): output = ''.join( [output, center_line(widths[column], table[row][column]), '|']) output = output + '\n|' split = output.split('\n') split.pop() table_string = '\n'.join(split) return table_string
Creates a markdown table. The first row will be headers. Parameters ---------- table : list of lists of str A list of rows containing strings. If any of these strings consist of multiple lines, they will be converted to single line because markdown tables do not support multiline cells. Returns ------- str The markdown formatted string Example ------- >>> table_data = [ ... ["Species", "Coolness"], ... ["Dog", "Awesome"], ... ["Cat", "Meh"], ... ] >>> print(data2md(table_data)) | Species | Coolness | |---------|----------| | Dog | Awesome | | Cat | Meh |
entailment
def v_center_cell_text(cell): """ Vertically center the text within the cell's grid. Like this:: +--------+ +--------+ | foobar | | | | | | | | | --> | foobar | | | | | | | | | +--------+ +--------+ Parameters ---------- cell : dashtable.data2rst.Cell Returns ------- cell : dashtable.data2rst.Cell """ lines = cell.text.split('\n') cell_width = len(lines[0]) - 2 truncated_lines = [] for i in range(1, len(lines) - 1): truncated = lines[i][1:len(lines[i]) - 1] truncated_lines.append(truncated) total_height = len(truncated_lines) empty_lines_above = 0 for i in range(len(truncated_lines)): if truncated_lines[i].rstrip() == '': empty_lines_above += 1 else: break empty_lines_below = 0 for i in reversed(range(len(truncated_lines))): if truncated_lines[i].rstrip() == '': empty_lines_below += 1 else: break significant_lines = truncated_lines[ empty_lines_above:len(truncated_lines) - empty_lines_below ] remainder = total_height - len(significant_lines) blank = cell_width * ' ' above_height = math.floor(remainder / 2) for i in range(0, above_height): significant_lines.insert(0, blank) below_height = math.ceil(remainder / 2) for i in range(0, below_height): significant_lines.append(blank) for i in range(len(significant_lines)): lines[i + 1] = ''.join([ lines[i + 1][0] + significant_lines[i] + lines[i + 1][-1] ]) cell.text = '\n'.join(lines) return cell
Vertically center the text within the cell's grid. Like this:: +--------+ +--------+ | foobar | | | | | | | | | --> | foobar | | | | | | | | | +--------+ +--------+ Parameters ---------- cell : dashtable.data2rst.Cell Returns ------- cell : dashtable.data2rst.Cell
entailment
def data2rst(table, spans=[[[0, 0]]], use_headers=True, center_cells=False, center_headers=False): """ Convert a list of lists of str into a reStructuredText Grid Table Parameters ---------- table : list of lists of str spans : list of lists of lists of int, optional These are [row, column] pairs of cells that are merged in the table. Rows and columns start in the top left of the table.For example:: +--------+--------+ | [0, 0] | [0, 1] | +--------+--------+ | [1, 0] | [1, 1] | +--------+--------+ use_headers : bool, optional Whether or not the first row of table data will become headers. center_cells : bool, optional Whether or not cells will be centered center_headers: bool, optional Whether or not headers will be centered Returns ------- str The grid table string Example ------- >>> spans = [ ... [ [3, 1], [4, 1] ], ... [ [3, 2], [4, 2] ], ... [ [2, 1], [2, 2] ], ... ] >>> table = [ ... ["Header 1", "Header 2", "Header 3"], ... ["body row 1", "column 2", "column 3"], ... ["body row 2", "Cells may span columns", ""], ... ["body row 3", "Cells may span rows.", "- Cells\\n-contain\\n-blocks"], ... ["body row 4", "", ""], ... ] >>> print(dashtable.data2rst(table, spans)) +------------+------------+-----------+ | Header 1 | Header 2 | Header 3 | +============+============+===========+ | body row 1 | column 2 | column 3 | +------------+------------+-----------+ | body row 2 | Cells may span columns.| +------------+------------+-----------+ | body row 3 | Cells may | - Cells | +------------+ span rows. | - contain | | body row 4 | | - blocks. | +------------+------------+-----------+ """ table = copy.deepcopy(table) table_ok = check_table(table) if not table_ok == "": return "ERROR: " + table_ok if not spans == [[[0, 0]]]: for span in spans: span_ok = check_span(span, table) if not span_ok == "": return "ERROR: " + span_ok table = ensure_table_strings(table) table = add_cushions(table) spans = table_cells_2_spans(table, spans) widths = get_output_column_widths(table, spans) heights = get_output_row_heights(table, spans) cells = [] for span in spans: cell = make_cell(table, span, widths, heights, use_headers) cells.append(cell) cells = list(sorted(cells)) if center_cells: for cell in cells: if not cell.is_header: center_cell_text(cell) v_center_cell_text(cell) if center_headers: for cell in cells: if cell.is_header: center_cell_text(cell) v_center_cell_text(cell) grid_table = merge_all_cells(cells) return grid_table
Convert a list of lists of str into a reStructuredText Grid Table Parameters ---------- table : list of lists of str spans : list of lists of lists of int, optional These are [row, column] pairs of cells that are merged in the table. Rows and columns start in the top left of the table.For example:: +--------+--------+ | [0, 0] | [0, 1] | +--------+--------+ | [1, 0] | [1, 1] | +--------+--------+ use_headers : bool, optional Whether or not the first row of table data will become headers. center_cells : bool, optional Whether or not cells will be centered center_headers: bool, optional Whether or not headers will be centered Returns ------- str The grid table string Example ------- >>> spans = [ ... [ [3, 1], [4, 1] ], ... [ [3, 2], [4, 2] ], ... [ [2, 1], [2, 2] ], ... ] >>> table = [ ... ["Header 1", "Header 2", "Header 3"], ... ["body row 1", "column 2", "column 3"], ... ["body row 2", "Cells may span columns", ""], ... ["body row 3", "Cells may span rows.", "- Cells\\n-contain\\n-blocks"], ... ["body row 4", "", ""], ... ] >>> print(dashtable.data2rst(table, spans)) +------------+------------+-----------+ | Header 1 | Header 2 | Header 3 | +============+============+===========+ | body row 1 | column 2 | column 3 | +------------+------------+-----------+ | body row 2 | Cells may span columns.| +------------+------------+-----------+ | body row 3 | Cells may | - Cells | +------------+ span rows. | - contain | | body row 4 | | - blocks. | +------------+------------+-----------+
entailment
def set_dims_from_tree_size(self): "Calculate reasonable height and width for tree given N tips" tlen = len(self.treelist[0]) if self.style.orient in ("right", "left"): # long tip-wise dimension if not self.style.height: self.style.height = max(275, min(1000, 18 * (tlen))) if not self.style.width: self.style.width = max(300, min(500, 18 * (tlen))) else: # long tip-wise dimension if not self.style.width: self.style.width = max(275, min(1000, 18 * (tlen))) if not self.style.height: self.style.height = max(225, min(500, 18 * (tlen)))
Calculate reasonable height and width for tree given N tips
entailment
def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order if self.style.orient in ("up", "down"): ypos = np.zeros(self.ntips) xpos = np.arange(self.ntips) if self.style.orient in ("right", "left"): xpos = np.zeros(self.ntips) ypos = np.arange(self.ntips) # pop fill from color dict if using color if self.style.tip_labels_colors: self.style.tip_labels_style.pop("fill") # fill anchor shift if None # (Toytrees fill this at draw() normally when tip_labels != None) if self.style.tip_labels_style["-toyplot-anchor-shift"] is None: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=self.style.tip_labels_style, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style['stroke-width'] = ( self.style.edge_style['stroke-width'])
Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting.
entailment
def fit_tip_labels(self): """ Modifies display range to ensure tip labels fit. This is a bit hackish still. The problem is that the 'extents' range of the rendered text is totally correct. So we add a little buffer here. Should add for user to be able to modify this if needed. If not using edge lengths then need to use unit length for treeheight. """ if not self.tip_labels: return # longest name (this will include html hacks) longest_name = max([len(i) for i in self.tip_labels]) if longest_name > 10: multiplier = 0.85 else: multiplier = 0.25 if self.style.use_edge_lengths: addon = (self.treelist[0].treenode.height + ( self.treelist[0].treenode.height * multiplier)) else: addon = self.treelist[0].treenode.get_farthest_leaf(True)[1] # modify display for orientations if self.style.orient == "right": self.axes.x.domain.max = addon elif self.style.orient == "down": self.axes.y.domain.min = -1 * addon
Modifies display range to ensure tip labels fit. This is a bit hackish still. The problem is that the 'extents' range of the rendered text is totally correct. So we add a little buffer here. Should add for user to be able to modify this if needed. If not using edge lengths then need to use unit length for treeheight.
entailment
def convert_p(element, text): """ Adds 2 newlines to the end of text """ depth = -1 while element: if (not element.name == '[document]' and not element.parent.get('id') == '__RESTRUCTIFY_WRAPPER__'): depth += 1 element = element.parent if text: text = ' ' * depth + text return text
Adds 2 newlines to the end of text
entailment
def simple2data(text): """ Convert a simple table to data (the kind used by DashTable) Parameters ---------- text : str A valid simple rst table Returns ------- table : list of lists of str spans : list of lists of lists of int A span is a [row, column] pair that defines a group of merged cells in the table. In a simple rst table, spans can only be colspans. use_headers : bool Whether or not this table uses headers headers_row : int The row where headers are located Notes ----- This function requires docutils_. .. _docutils: http://docutils.sourceforge.net/ Example ------- >>> html_text = ''' ... ====== ===== ====== ... Inputs Output ... ------------- ------ ... A B A or B ... ====== ===== ====== ... False False False ... True False True ... False True True ... True True True ... ====== ===== ====== ... ''' >>> from dashtable import simple2data >>> table, spans, use_headers, headers_row = simple2data(html_text) >>> from pprint import pprint >>> pprint(table) [['Inputs', 'Output', ''], ['A', 'B', 'A or B'], ['False', 'False', 'False'], ['True, 'False', 'True'], ['False', 'True', 'True'], ['True', 'True', 'True']] >>> print(spans) [[[0, 0], [0, 1]]] >>> print(use_headers) True >>> print(headers_row) 1 """ try: import docutils.statemachine import docutils.parsers.rst.tableparser except ImportError: print("ERROR: You must install the docutils library to use simple2data") return lines = text.split('\n') lines = truncate_empty_lines(lines) leading_space = lines[0].replace(lines[0].lstrip(), '') for i in range(len(lines)): lines[i] = lines[i][len(leading_space)::] parser = docutils.parsers.rst.tableparser.SimpleTableParser() block = docutils.statemachine.StringList(list(lines)) simple_data = list(parser.parse(block)) column_widths = simple_data.pop(0) column_count = len(column_widths) headers_row = 0 if len(simple_data[0]) > 0: use_headers = True headers_row = len(simple_data[0]) - 1 headers = simple_data[0][0] row_count = len(simple_data[1]) + len(simple_data[0]) while len(simple_data[0]) > 0: simple_data[1].insert(0, simple_data[0][-1]) simple_data[0].pop(-1) simple_data.pop(0) else: use_headers = False simple_data.pop(0) row_count = len(simple_data[0]) simple_data = simple_data[0] table = make_empty_table(row_count, column_count) spans = [] for row in range(len(simple_data)): for column in range(len(simple_data[row])): try: text = '\n'.join(simple_data[row][column][3]).rstrip() table[row][column] = text extra_rows = simple_data[row][column][0] extra_columns = simple_data[row][column][1] span = make_span(row, column, extra_rows, extra_columns) span = sorted(span) span = list(span for span,_ in itertools.groupby(span)) if not len(span) == 1: spans.append(span) except TypeError: pass spans = sorted(spans) return table, spans, use_headers, headers_row
Convert a simple table to data (the kind used by DashTable) Parameters ---------- text : str A valid simple rst table Returns ------- table : list of lists of str spans : list of lists of lists of int A span is a [row, column] pair that defines a group of merged cells in the table. In a simple rst table, spans can only be colspans. use_headers : bool Whether or not this table uses headers headers_row : int The row where headers are located Notes ----- This function requires docutils_. .. _docutils: http://docutils.sourceforge.net/ Example ------- >>> html_text = ''' ... ====== ===== ====== ... Inputs Output ... ------------- ------ ... A B A or B ... ====== ===== ====== ... False False False ... True False True ... False True True ... True True True ... ====== ===== ====== ... ''' >>> from dashtable import simple2data >>> table, spans, use_headers, headers_row = simple2data(html_text) >>> from pprint import pprint >>> pprint(table) [['Inputs', 'Output', ''], ['A', 'B', 'A or B'], ['False', 'False', 'False'], ['True, 'False', 'True'], ['False', 'True', 'True'], ['True', 'True', 'True']] >>> print(spans) [[[0, 0], [0, 1]]] >>> print(use_headers) True >>> print(headers_row) 1
entailment
def get_output_column_widths(table, spans): """ Gets the widths of the columns of the output table Parameters ---------- table : list of lists of str The table of rows of text spans : list of lists of int The [row, column] pairs of combined cells Returns ------- widths : list of int The widths of each column in the output table """ widths = [] for column in table[0]: widths.append(3) for row in range(len(table)): for column in range(len(table[row])): span = get_span(spans, row, column) column_count = get_span_column_count(span) if column_count == 1: text_row = span[0][0] text_column = span[0][1] text = table[text_row][text_column] length = get_longest_line_length(text) if length > widths[column]: widths[column] = length for row in range(len(table)): for column in range(len(table[row])): span = get_span(spans, row, column) column_count = get_span_column_count(span) if column_count > 1: text_row = span[0][0] text_column = span[0][1] text = table[text_row][text_column] end_column = text_column + column_count available_space = sum( widths[text_column:end_column]) available_space += column_count - 1 length = get_longest_line_length(text) while length > available_space: for i in range(text_column, end_column): widths[i] += 1 available_space = sum( widths[text_column:end_column]) available_space += column_count - 1 if length <= available_space: break return widths
Gets the widths of the columns of the output table Parameters ---------- table : list of lists of str The table of rows of text spans : list of lists of int The [row, column] pairs of combined cells Returns ------- widths : list of int The widths of each column in the output table
entailment
def make_empty_table(row_count, column_count): """ Make an empty table Parameters ---------- row_count : int The number of rows in the new table column_count : int The number of columns in the new table Returns ------- table : list of lists of str Each cell will be an empty str ('') """ table = [] while row_count > 0: row = [] for column in range(column_count): row.append('') table.append(row) row_count -= 1 return table
Make an empty table Parameters ---------- row_count : int The number of rows in the new table column_count : int The number of columns in the new table Returns ------- table : list of lists of str Each cell will be an empty str ('')
entailment
def beta(self): '''\ The linear estimation of the parameter vector :math:`\beta` given by .. math:: \beta = (X^T X)^-1 X^T y ''' t = self.X.transpose() XX = dot(t,self.X) XY = dot(t,self.y) return linalg.solve(XX,XY)
\ The linear estimation of the parameter vector :math:`\beta` given by .. math:: \beta = (X^T X)^-1 X^T y
entailment
def oftype(self, typ): '''Return a generator of formatters codes of type typ''' for key, val in self.items(): if val.type == typ: yield key
Return a generator of formatters codes of type typ
entailment
def names(self, with_namespace=False): '''List of names for series in dataset. It will always return a list or names with length given by :class:`~.DynData.count`. ''' N = self.count() names = self.name.split(settings.splittingnames)[:N] n = 0 while len(names) < N: n += 1 names.append('unnamed%s' % n) if with_namespace and self.namespace: n = self.namespace s = settings.field_separator return [n + s + f for f in names] else: return names
List of names for series in dataset. It will always return a list or names with length given by :class:`~.DynData.count`.
entailment
def dump(self, format=None, **kwargs): """Dump the timeseries using a specific ``format``. """ formatter = Formatters.get(format, None) if not format: return self.display() elif not formatter: raise FormattingException('Formatter %s not available' % format) else: return formatter(self, **kwargs)
Dump the timeseries using a specific ``format``.
entailment
def p_expression_binop(p): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression | expression EQUAL expression | expression CONCAT expression | expression SPLIT expression''' v = p[2] if v == '+': p[0] = PlusOp(p[1], p[3]) elif v == '-': p[0] = MinusOp(p[1], p[3]) elif v == '*': p[0] = MultiplyOp(p[1], p[3]) elif v == '/': p[0] = DivideOp(p[1], p[3]) elif v == '=': p[0] = EqualOp(p[1], p[3]) elif v == settings.concat_operator: p[0] = ConcatenationOp(p[1], p[3]) elif v == settings.separator_operator: p[0] = SplittingOp(p[1], p[3]) elif v == settings.field_operator: p[0] = Symbol(p[1], field=p[3])
expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression | expression EQUAL expression | expression CONCAT expression | expression SPLIT expression
entailment
def p_expression_group(p): '''expression : LPAREN expression RPAREN | LSQUARE expression RSQUARE''' v = p[1] if v == '(': p[0] = functionarguments(p[2]) elif v == '[': p[0] = tsentry(p[2])
expression : LPAREN expression RPAREN | LSQUARE expression RSQUARE
entailment
def merge_cells(cell1, cell2, direction): """ Combine the side of cell1's grid text with cell2's text. For example:: cell1 cell2 merge "RIGHT" +-----+ +------+ +-----+------+ | foo | | dog | | foo | dog | | | +------+ | +------+ | | | cat | | | cat | | | +------+ | +------+ | | | bird | | | bird | +-----+ +------+ +-----+------+ Parameters ---------- cell1 : dashtable.data2rst.Cell cell2 : dashtable.data2rst.Cell """ cell1_lines = cell1.text.split("\n") cell2_lines = cell2.text.split("\n") if direction == "RIGHT": for i in range(len(cell1_lines)): cell1_lines[i] = cell1_lines[i] + cell2_lines[i][1::] cell1.text = "\n".join(cell1_lines) cell1.column_count += cell2.column_count elif direction == "TOP": if cell1_lines[0].count('+') > cell2_lines[-1].count('+'): cell2_lines.pop(-1) else: cell1_lines.pop(0) cell2_lines.extend(cell1_lines) cell1.text = "\n".join(cell2_lines) cell1.row_count += cell2.row_count cell1.row = cell2.row cell1.column = cell2.column elif direction == "BOTTOM": if (cell1_lines[-1].count('+') > cell2_lines[0].count('+') or cell1.is_header): cell2_lines.pop(0) else: cell1_lines.pop(-1) cell1_lines.extend(cell2_lines) cell1.text = "\n".join(cell1_lines) cell1.row_count += cell2.row_count elif direction == "LEFT": for i in range(len(cell1_lines)): cell1_lines[i] = cell2_lines[i][0:-1] + cell1_lines[i] cell1.text = "\n".join(cell1_lines) cell1.column_count += cell2.column_count cell1.row = cell2.row cell1.column = cell2.column
Combine the side of cell1's grid text with cell2's text. For example:: cell1 cell2 merge "RIGHT" +-----+ +------+ +-----+------+ | foo | | dog | | foo | dog | | | +------+ | +------+ | | | cat | | | cat | | | +------+ | +------+ | | | bird | | | bird | +-----+ +------+ +-----+------+ Parameters ---------- cell1 : dashtable.data2rst.Cell cell2 : dashtable.data2rst.Cell
entailment
def iterclass(cls): """Iterates over (valid) attributes of a class. Args: cls (object): the class to iterate over Yields: (str, obj) tuples: the class-level attributes. """ for field in dir(cls): if hasattr(cls, field): value = getattr(cls, field) yield field, value
Iterates over (valid) attributes of a class. Args: cls (object): the class to iterate over Yields: (str, obj) tuples: the class-level attributes.
entailment
def _mksocket(host, port, q, done, stop): """Returns a tcp socket to (host/port). Retries forever if connection fails""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(2) attempt = 0 while not stop.is_set(): try: s.connect((host, port)) return s except Exception as ex: # Simple exponential backoff: sleep for 1,2,4,8,16,30,30... time.sleep(min(30, 2 ** attempt)) attempt += 1
Returns a tcp socket to (host/port). Retries forever if connection fails
entailment
def _push(host, port, q, done, mps, stop, test_mode): """Worker thread. Connect to host/port, pull data from q until done is set""" sock = None retry_line = None while not ( stop.is_set() or ( done.is_set() and retry_line == None and q.empty()) ): stime = time.time() if sock == None and not test_mode: sock = _mksocket(host, port, q, done, stop) if sock == None: break if retry_line: line = retry_line retry_line = None else: try: line = q.get(True, 1) # blocking, with 1 second timeout except: if done.is_set(): # no items in queue, and parent finished break else: # no items in queue, but parent might send more continue if not test_mode: try: sock.sendall(line.encode('utf-8')) except: sock = None # notify that we need to make a new socket at start of loop retry_line = line # can't really put back in q, so remember to retry this line continue etime = time.time() - stime #time that actually elapsed #Expected value of wait_time is 1/MPS_LIMIT, ie. MPS_LIMIT per second. if mps > 0: wait_time = (2.0 * random.random()) / (mps) if wait_time > etime: #if we should wait time.sleep(wait_time - etime) #then wait if sock: sock.close()
Worker thread. Connect to host/port, pull data from q until done is set
entailment
def log(self, name, val, **tags): """Log metric name with value val. You must include at least one tag as a kwarg""" global _last_timestamp, _last_metrics # do not allow .log after closing assert not self.done.is_set(), "worker thread has been closed" # check if valid metric name assert all(c in _valid_metric_chars for c in name), "invalid metric name " + name val = float(val) #Duck type to float/int, if possible. if int(val) == val: val = int(val) if self.host_tag and 'host' not in tags: tags['host'] = self.host_tag # get timestamp from system time, unless it's supplied as a tag timestamp = int(tags.pop('timestamp', time.time())) assert not self.done.is_set(), "tsdb object has been closed" assert tags != {}, "Need at least one tag" tagvals = ' '.join(['%s=%s' % (k, v) for k, v in tags.items()]) # OpenTSDB has major problems if you insert a data point with the same # metric, timestamp and tags. So we keep a temporary set of what points # we have sent for the last timestamp value. If we encounter a duplicate, # it is dropped. unique_str = "%s, %s, %s, %s, %s" % (name, timestamp, tagvals, self.host, self.port) if timestamp == _last_timestamp or _last_timestamp == None: if unique_str in _last_metrics: return # discard duplicate metrics else: _last_metrics.add(unique_str) else: _last_timestamp = timestamp _last_metrics.clear() line = "put %s %d %s %s\n" % (name, timestamp, val, tagvals) try: self.q.put(line, False) self.queued += 1 except queue.Full: print("potsdb - Warning: dropping oldest metric because Queue is full. Size: %s" % self.q.qsize(), file=sys.stderr) self.q.get() #Drop the oldest metric to make room self.q.put(line, False) return line
Log metric name with value val. You must include at least one tag as a kwarg
entailment
def available_ports(): """ Scans COM1 through COM255 for available serial ports returns a list of available ports """ ports = [] for i in range(256): try: p = Serial('COM%d' % i) p.close() ports.append(p) except SerialException: pass return ports
Scans COM1 through COM255 for available serial ports returns a list of available ports
entailment
def get_current_response(self): """ reads the current response data from the object and returns it in a dict. Currently 'time' is reported as 0 until clock drift issues are resolved. """ response = {'port': 0, 'pressed': False, 'key': 0, 'time': 0} if len(self.__response_structs_queue) > 0: # make a copy just in case any other internal members of # XidConnection were tracking the structure response = self.__response_structs_queue[0].copy() # we will now hand over 'response' to the calling code, # so remove it from the internal queue self.__response_structs_queue.pop(0) return response
reads the current response data from the object and returns it in a dict. Currently 'time' is reported as 0 until clock drift issues are resolved.
entailment
def detect_xid_devices(self): """ For all of the com ports connected to the computer, send an XID command '_c1'. If the device response with '_xid', it is an xid device. """ self.__xid_cons = [] for c in self.__com_ports: device_found = False for b in [115200, 19200, 9600, 57600, 38400]: con = XidConnection(c, b) try: con.open() except SerialException: continue con.flush_input() con.flush_output() returnval = con.send_xid_command("_c1", 5).decode('ASCII') if returnval.startswith('_xid'): device_found = True self.__xid_cons.append(con) if(returnval != '_xid0'): # set the device into XID mode con.send_xid_command('c10') con.flush_input() con.flush_output() # be sure to reset the timer to avoid the 4.66 hours # problem. (refer to XidConnection.xid_input_found to # read about the 4.66 hours) con.send_xid_command('e1') con.send_xid_command('e5') con.close() if device_found: break
For all of the com ports connected to the computer, send an XID command '_c1'. If the device response with '_xid', it is an xid device.
entailment
def device_at_index(self, index): """ Returns the device at the specified index """ if index >= len(self.__xid_cons): raise ValueError("Invalid device index") return self.__xid_cons[index]
Returns the device at the specified index
entailment
def query_base_timer(self): """ gets the value from the device's base timer """ (_, _, time) = unpack('<ccI', self.con.send_xid_command("e3", 6)) return time
gets the value from the device's base timer
entailment
def poll_for_response(self): """ Polls the device for user input If there is a keymapping for the device, the key map is applied to the key reported from the device. If a response is waiting to be processed, the response is appended to the internal response_queue """ key_state = self.con.check_for_keypress() if key_state != NO_KEY_DETECTED: response = self.con.get_current_response() if self.keymap is not None: response['key'] = self.keymap[response['key']] else: response['key'] -= 1 self.response_queue.append(response)
Polls the device for user input If there is a keymapping for the device, the key map is applied to the key reported from the device. If a response is waiting to be processed, the response is appended to the internal response_queue
entailment
def set_pulse_duration(self, duration): """ Sets the pulse duration for events in miliseconds when activate_line is called """ if duration > 4294967295: raise ValueError('Duration is too long. Please choose a value ' 'less than 4294967296.') big_endian = hex(duration)[2:] if len(big_endian) % 2 != 0: big_endian = '0'+big_endian little_endian = [] for i in range(0, len(big_endian), 2): little_endian.insert(0, big_endian[i:i+2]) for i in range(0, 4-len(little_endian)): little_endian.append('00') command = 'mp' for i in little_endian: command += chr(int(i, 16)) self.con.send_xid_command(command, 0)
Sets the pulse duration for events in miliseconds when activate_line is called
entailment
def activate_line(self, lines=None, bitmask=None, leave_remaining_lines=False): """ Triggers an output line on StimTracker. There are 8 output lines on StimTracker that can be raised in any combination. To raise lines 1 and 7, for example, you pass in the list: activate_line(lines=[1, 7]). To raise a single line, pass in just an integer, or a list with a single element to the lines keyword argument: activate_line(lines=3) or activate_line(lines=[3]) The `lines` argument must either be an Integer, list of Integers, or None. If you'd rather specify a bitmask for setting the lines, you can use the bitmask keyword argument. Bitmask must be a Integer value between 0 and 255 where 0 specifies no lines, and 255 is all lines. For a mapping between lines and their bit values, see the `_lines` class variable. To use this, call the function as so to activate lines 1 and 6: activate_line(bitmask=33) leave_remaining_lines tells the function to only operate on the lines specified. For example, if lines 1 and 8 are active, and you make the following function call: activate_line(lines=4, leave_remaining_lines=True) This will result in lines 1, 4 and 8 being active. If you call activate_line(lines=4) with leave_remaining_lines=False (the default), if lines 1 and 8 were previously active, only line 4 will be active after the call. """ if lines is None and bitmask is None: raise ValueError('Must set one of lines or bitmask') if lines is not None and bitmask is not None: raise ValueError('Can only set one of lines or bitmask') if bitmask is not None: if bitmask not in range(0, 256): raise ValueError('bitmask must be an integer between ' '0 and 255') if lines is not None: if not isinstance(lines, list): lines = [lines] bitmask = 0 for l in lines: if l < 1 or l > 8: raise ValueError('Line numbers must be between 1 and 8 ' '(inclusive)') bitmask |= self._lines[l] self.con.set_digital_output_lines(bitmask, leave_remaining_lines)
Triggers an output line on StimTracker. There are 8 output lines on StimTracker that can be raised in any combination. To raise lines 1 and 7, for example, you pass in the list: activate_line(lines=[1, 7]). To raise a single line, pass in just an integer, or a list with a single element to the lines keyword argument: activate_line(lines=3) or activate_line(lines=[3]) The `lines` argument must either be an Integer, list of Integers, or None. If you'd rather specify a bitmask for setting the lines, you can use the bitmask keyword argument. Bitmask must be a Integer value between 0 and 255 where 0 specifies no lines, and 255 is all lines. For a mapping between lines and their bit values, see the `_lines` class variable. To use this, call the function as so to activate lines 1 and 6: activate_line(bitmask=33) leave_remaining_lines tells the function to only operate on the lines specified. For example, if lines 1 and 8 are active, and you make the following function call: activate_line(lines=4, leave_remaining_lines=True) This will result in lines 1, 4 and 8 being active. If you call activate_line(lines=4) with leave_remaining_lines=False (the default), if lines 1 and 8 were previously active, only line 4 will be active after the call.
entailment
def clear_line(self, lines=None, bitmask=None, leave_remaining_lines=False): """ The inverse of activate_line. If a line is active, it deactivates it. This has the same parameters as activate_line() """ if lines is None and bitmask is None: raise ValueError('Must set one of lines or bitmask') if lines is not None and bitmask is not None: raise ValueError('Can only set one of lines or bitmask') if bitmask is not None: if bitmask not in range(0, 256): raise ValueError('bitmask must be an integer between ' '0 and 255') if lines is not None: if not isinstance(lines, list): lines = [lines] bitmask = 0 for l in lines: if l < 1 or l > 8: raise ValueError('Line numbers must be between 1 and 8 ' '(inclusive)') bitmask |= self._lines[l] self.con.clear_digital_output_lines(bitmask, leave_remaining_lines)
The inverse of activate_line. If a line is active, it deactivates it. This has the same parameters as activate_line()
entailment
def init_device(self): """ Initializes the device with the proper keymaps and name """ try: product_id = int(self._send_command('_d2', 1)) except ValueError: product_id = self._send_command('_d2', 1) if product_id == 0: self._impl = ResponseDevice( self.con, 'Cedrus Lumina LP-400 Response Pad System', lumina_keymap) elif product_id == 1: self._impl = ResponseDevice( self.con, 'Cedrus SV-1 Voice Key', None, 'Voice Response') elif product_id == 2: model_id = int(self._send_command('_d3', 1)) if model_id == 1: self._impl = ResponseDevice( self.con, 'Cedrus RB-530', rb_530_keymap) elif model_id == 2: self._impl = ResponseDevice( self.con, 'Cedrus RB-730', rb_730_keymap) elif model_id == 3: self._impl = ResponseDevice( self.con, 'Cedrus RB-830', rb_830_keymap) elif model_id == 4: self._impl = ResponseDevice( self.con, 'Cedrus RB-834', rb_834_keymap) else: raise XidError('Unknown RB Device') elif product_id == 4: self._impl = StimTracker( self.con, 'Cedrus C-POD') elif product_id == b'S': self._impl = StimTracker( self.con, 'Cedrus StimTracker') elif product_id == -99: raise XidError('Invalid XID device')
Initializes the device with the proper keymaps and name
entailment
def _send_command(self, command, expected_bytes): """ Send an XID command to the device """ response = self.con.send_xid_command(command, expected_bytes) return response
Send an XID command to the device
entailment
def get_xid_devices(): """ Returns a list of all Xid devices connected to your computer. """ devices = [] scanner = XidScanner() for i in range(scanner.device_count()): com = scanner.device_at_index(i) com.open() device = XidDevice(com) devices.append(device) return devices
Returns a list of all Xid devices connected to your computer.
entailment
def get_xid_device(device_number): """ returns device at a given index. Raises ValueError if the device at the passed in index doesn't exist. """ scanner = XidScanner() com = scanner.device_at_index(device_number) com.open() return XidDevice(com)
returns device at a given index. Raises ValueError if the device at the passed in index doesn't exist.
entailment
def connect(self, receiver): """Append receiver.""" if not callable(receiver): raise ValueError('Invalid receiver: %s' % receiver) self.receivers.append(receiver)
Append receiver.
entailment
def disconnect(self, receiver): """Remove receiver.""" try: self.receivers.remove(receiver) except ValueError: raise ValueError('Unknown receiver: %s' % receiver)
Remove receiver.
entailment
def send(self, instance, *args, **kwargs): """Send signal.""" for receiver in self.receivers: receiver(instance, *args, **kwargs)
Send signal.
entailment
def select(cls, *args, **kwargs): """Support read slaves.""" query = super(Model, cls).select(*args, **kwargs) query.database = cls._get_read_database() return query
Support read slaves.
entailment
def save(self, force_insert=False, **kwargs): """Send signals.""" created = force_insert or not bool(self.pk) self.pre_save.send(self, created=created) super(Model, self).save(force_insert=force_insert, **kwargs) self.post_save.send(self, created=created)
Send signals.
entailment
def delete_instance(self, *args, **kwargs): """Send signals.""" self.pre_delete.send(self) super(Model, self).delete_instance(*args, **kwargs) self.post_delete.send(self)
Send signals.
entailment
def get_database(obj, **params): """Get database from given URI/Object.""" if isinstance(obj, string_types): return connect(obj, **params) return obj
Get database from given URI/Object.
entailment
def init_app(self, app, database=None): """Initialize application.""" # Register application if not app: raise RuntimeError('Invalid application.') self.app = app if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['peewee'] = self app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {}) app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite') app.config.setdefault('PEEWEE_MANUAL', False) app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations') app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory') app.config.setdefault('PEEWEE_MODELS_CLASS', Model) app.config.setdefault('PEEWEE_MODELS_IGNORE', []) app.config.setdefault('PEEWEE_MODELS_MODULE', '') app.config.setdefault('PEEWEE_READ_SLAVES', '') app.config.setdefault('PEEWEE_USE_READ_SLAVES', True) # Initialize database params = app.config['PEEWEE_CONNECTION_PARAMS'] database = database or app.config.get('PEEWEE_DATABASE_URI') if not database: raise RuntimeError('Invalid database.') database = get_database(database, **params) slaves = app.config['PEEWEE_READ_SLAVES'] if isinstance(slaves, string_types): slaves = slaves.split(',') self.slaves = [get_database(slave, **params) for slave in slaves if slave] self.database.initialize(database) if self.database.database == ':memory:': app.config['PEEWEE_MANUAL'] = True if not app.config['PEEWEE_MANUAL']: app.before_request(self.connect) app.teardown_request(self.close)
Initialize application.
entailment
def close(self, response): """Close connection to database.""" LOGGER.info('Closing [%s]', os.getpid()) if not self.database.is_closed(): self.database.close() return response
Close connection to database.
entailment
def Model(self): """Bind model to self database.""" Model_ = self.app.config['PEEWEE_MODELS_CLASS'] meta_params = {'database': self.database} if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']: meta_params['read_slaves'] = self.slaves Meta = type('Meta', (), meta_params) return type('Model', (Model_,), {'Meta': Meta})
Bind model to self database.
entailment
def models(self): """Return self.application models.""" Model_ = self.app.config['PEEWEE_MODELS_CLASS'] ignore = self.app.config['PEEWEE_MODELS_IGNORE'] models = [] if Model_ is not Model: try: mod = import_module(self.app.config['PEEWEE_MODELS_MODULE']) for model in dir(mod): models = getattr(mod, model) if not isinstance(model, pw.Model): continue models.append(models) except ImportError: return models elif isinstance(Model_, BaseSignalModel): models = BaseSignalModel.models return [m for m in models if m._meta.name not in ignore]
Return self.application models.
entailment
def cmd_create(self, name, auto=False): """Create a new migration.""" LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) if auto: auto = self.models router.create(name, auto=auto)
Create a new migration.
entailment
def cmd_migrate(self, name=None, fake=False): """Run migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) migrations = router.run(name, fake=fake) if migrations: LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
Run migrations.
entailment
def cmd_rollback(self, name): """Rollback migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) router.rollback(name)
Rollback migrations.
entailment
def cmd_list(self): """List migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('DEBUG') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) LOGGER.info('Migrations are done:') LOGGER.info('\n'.join(router.done)) LOGGER.info('') LOGGER.info('Migrations are undone:') LOGGER.info('\n'.join(router.diff))
List migrations.
entailment
def cmd_merge(self): """Merge migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('DEBUG') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) router.merge()
Merge migrations.
entailment
def manager(self): """Integrate a Flask-Script.""" from flask_script import Manager, Command manager = Manager(usage="Migrate database.") manager.add_command('create', Command(self.cmd_create)) manager.add_command('migrate', Command(self.cmd_migrate)) manager.add_command('rollback', Command(self.cmd_rollback)) manager.add_command('list', Command(self.cmd_list)) manager.add_command('merge', Command(self.cmd_merge)) return manager
Integrate a Flask-Script.
entailment
def restart_program(): """ DOES NOT WORK WELL WITH MOPIDY Hack from https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program to support updating the settings, since mopidy is not able to do that yet Restarts the current program Note: this function does not return. Any cleanup action (like saving data) must be done before calling this function """ python = sys.executable os.execl(python, python, * sys.argv)
DOES NOT WORK WELL WITH MOPIDY Hack from https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program to support updating the settings, since mopidy is not able to do that yet Restarts the current program Note: this function does not return. Any cleanup action (like saving data) must be done before calling this function
entailment
def __load(arff): """ load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame """ attrs = arff['attributes'] attrs_t = [] for attr in attrs: if isinstance(attr[1], list): attrs_t.append("%s@{%s}" % (attr[0], ','.join(attr[1]))) else: attrs_t.append("%s@%s" % (attr[0], attr[1])) df = pd.DataFrame(data=arff['data'], columns=attrs_t) return df
load liac-arff to pandas DataFrame :param dict arff:arff dict created liac-arff :rtype: DataFrame :return: pandas DataFrame
entailment
def __dump(df,relation='data',description=''): """ dump DataFrame to liac-arff :param DataFrame df: :param str relation: :param str description: :rtype: dict :return: liac-arff dict """ attrs = [] for col in df.columns: attr = col.split('@') if attr[1].count('{')>0 and attr[1].count('}')>0: vals = attr[1].replace('{','').replace('}','').split(',') attrs.append((attr[0],vals)) else: attrs.append((attr[0],attr[1])) data = list(df.values) result = { 'attributes':attrs, 'data':data, 'description':description, 'relation':relation } return result
dump DataFrame to liac-arff :param DataFrame df: :param str relation: :param str description: :rtype: dict :return: liac-arff dict
entailment
def dump(df,fp): """ dump DataFrame to file :param DataFrame df: :param file fp: """ arff = __dump(df) liacarff.dump(arff,fp)
dump DataFrame to file :param DataFrame df: :param file fp:
entailment
def markup_line(text, offset, marker='>>!<<'): """Insert `marker` at `offset` into `text`, and return the marked line. .. code-block:: python >>> markup_line('0\\n1234\\n56', 3) 1>>!<<234 """ begin = text.rfind('\n', 0, offset) begin += 1 end = text.find('\n', offset) if end == -1: end = len(text) return text[begin:offset] + marker + text[offset:end]
Insert `marker` at `offset` into `text`, and return the marked line. .. code-block:: python >>> markup_line('0\\n1234\\n56', 3) 1>>!<<234
entailment
def tokenize_init(spec): """Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser. """ tokens = [Token('__SOF__', '__SOF__', 0)] re_token = '|'.join([ '(?P<{}>{})'.format(name, regex) for name, regex in spec ]) return tokens, re_token
Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser.
entailment
def tokenize(self, text): """Tokenize given string `text`, and return a list of tokens. Raises :class:`~textparser.TokenizeError` on failure. This method should only be called by :func:`~textparser.Parser.parse()`, but may very well be overridden if the default implementation does not match the parser needs. """ names, specs = self._unpack_token_specs() keywords = self.keywords() tokens, re_token = tokenize_init(specs) for mo in re.finditer(re_token, text, re.DOTALL): kind = mo.lastgroup if kind == 'SKIP': pass elif kind != 'MISMATCH': value = mo.group(kind) if value in keywords: kind = value if kind in names: kind = names[kind] tokens.append(Token(kind, value, mo.start())) else: raise TokenizeError(text, mo.start()) return tokens
Tokenize given string `text`, and return a list of tokens. Raises :class:`~textparser.TokenizeError` on failure. This method should only be called by :func:`~textparser.Parser.parse()`, but may very well be overridden if the default implementation does not match the parser needs.
entailment
def parse(self, text, token_tree=False, match_sof=False): """Parse given string `text` and return the parse tree. Raises :class:`~textparser.ParseError` on failure. Returns a parse tree of tokens if `token_tree` is ``True``. .. code-block:: python >>> MyParser().parse('Hello, World!') ['Hello', ',', 'World', '!'] >>> tree = MyParser().parse('Hello, World!', token_tree=True) >>> from pprint import pprint >>> pprint(tree) [Token(kind='WORD', value='Hello', offset=0), Token(kind=',', value=',', offset=5), Token(kind='WORD', value='World', offset=7), Token(kind='!', value='!', offset=12)] """ try: tokens = self.tokenize(text) if len(tokens) == 0 or tokens[-1].kind != '__EOF__': tokens.append(Token('__EOF__', '__EOF__', len(text))) if not match_sof: if len(tokens) > 0 and tokens[0].kind == '__SOF__': del tokens[0] return Grammar(self.grammar()).parse(tokens, token_tree) except (TokenizeError, GrammarError) as e: raise ParseError(text, e.offset)
Parse given string `text` and return the parse tree. Raises :class:`~textparser.ParseError` on failure. Returns a parse tree of tokens if `token_tree` is ``True``. .. code-block:: python >>> MyParser().parse('Hello, World!') ['Hello', ',', 'World', '!'] >>> tree = MyParser().parse('Hello, World!', token_tree=True) >>> from pprint import pprint >>> pprint(tree) [Token(kind='WORD', value='Hello', offset=0), Token(kind=',', value=',', offset=5), Token(kind='WORD', value='World', offset=7), Token(kind='!', value='!', offset=12)]
entailment
def x_at_y(self, y, reverse=False): """ Calculates inverse profile - for given y returns x such that f(x) = y If given y is not found in the self.y, then interpolation is used. By default returns first result looking from left, if reverse argument set to True, looks from right. If y is outside range of self.y then np.nan is returned. Use inverse lookup to get x-coordinate of first point: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(5.)) 0.0 Use inverse lookup to get x-coordinate of second point, looking from left: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(10.)) 0.1 Use inverse lookup to get x-coordinate of fourth point, looking from right: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(10., reverse=True)) 0.3 Use interpolation between first two points: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(7.5)) 0.05 Looking for y below self.y range: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(2.0)) nan Looking for y above self.y range: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(22.0)) nan :param y: reference value :param reverse: boolean value - direction of lookup :return: x value corresponding to given y or NaN if not found """ logger.info('Running %(name)s.y_at_x(y=%(y)s, reverse=%(rev)s)', {"name": self.__class__, "y": y, "rev": reverse}) # positive or negative direction handles x_handle, y_handle = self.x, self.y if reverse: x_handle, y_handle = self.x[::-1], self.y[::-1] # find the index of first value in self.y greater or equal than y cond = y_handle >= y ind = np.argmax(cond) # two boundary conditions where x cannot be found: # A) y > max(self.y) # B) y < min(self.y) # A) if y > max(self.y) then condition self.y >= y # will never be satisfied # np.argmax( cond ) will be equal 0 and cond[ind] will be False if not cond[ind]: return np.nan # B) if y < min(self.y) then condition self.y >= y # will be satisfied on first item # np.argmax(cond) will be equal 0, # to exclude situation that y_handle[0] = y # we also check if y < y_handle[0] if ind == 0 and y < y_handle[0]: return np.nan # use lookup if y in self.y: if cond[ind] and y_handle[ind] == y: return x_handle[ind] # alternatively - pure python implementation # return x_handle[ind] - \ # ((x_handle[ind] - x_handle[ind - 1]) / \ # (y_handle[ind] - y_handle[ind - 1])) * \ # (y_handle[ind] - y) # use interpolation sl = slice(ind - 1, ind + 1) return np.interp(y, y_handle[sl], x_handle[sl])
Calculates inverse profile - for given y returns x such that f(x) = y If given y is not found in the self.y, then interpolation is used. By default returns first result looking from left, if reverse argument set to True, looks from right. If y is outside range of self.y then np.nan is returned. Use inverse lookup to get x-coordinate of first point: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(5.)) 0.0 Use inverse lookup to get x-coordinate of second point, looking from left: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(10.)) 0.1 Use inverse lookup to get x-coordinate of fourth point, looking from right: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(10., reverse=True)) 0.3 Use interpolation between first two points: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(7.5)) 0.05 Looking for y below self.y range: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(2.0)) nan Looking for y above self.y range: >>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\ .x_at_y(22.0)) nan :param y: reference value :param reverse: boolean value - direction of lookup :return: x value corresponding to given y or NaN if not found
entailment
def width(self, level): """ Width at given level :param level: :return: """ return self.x_at_y(level, reverse=True) - self.x_at_y(level)
Width at given level :param level: :return:
entailment
def normalize(self, dt, allow_cast=True): """ Normalize to 1 over [-dt, +dt] area, if allow_cast is set to True, division not in place and casting may occur. If division in place is not possible and allow_cast is False an exception is raised. >>> a = Profile([[0, 0], [1, 5], [2, 10], [3, 5], [4, 0]]) >>> a.normalize(1, allow_cast=True) >>> print(a.y) [0. 2. 4. 2. 0.] :param dt: :param allow_cast: """ if dt <= 0: raise ValueError("Expected positive input") logger.info('Running %(name)s.normalize(dt=%(dt)s)', {"name": self.__class__, "dt": dt}) try: ave = np.average(self.y[np.fabs(self.x) <= dt]) except RuntimeWarning as e: logger.error('in normalize(). self class is %(name)s, dt=%(dt)s', {"name": self.__class__, "dt": dt}) raise Exception("Scaling factor error: {0}".format(e)) try: self.y /= ave except TypeError as e: logger.warning("Division in place is impossible: %s", e) if allow_cast: self.y = self.y / ave else: logger.error("Division in place impossible - allow_cast flag set to True should help") raise
Normalize to 1 over [-dt, +dt] area, if allow_cast is set to True, division not in place and casting may occur. If division in place is not possible and allow_cast is False an exception is raised. >>> a = Profile([[0, 0], [1, 5], [2, 10], [3, 5], [4, 0]]) >>> a.normalize(1, allow_cast=True) >>> print(a.y) [0. 2. 4. 2. 0.] :param dt: :param allow_cast:
entailment
def rescale(self, factor=1.0, allow_cast=True): """ Rescales self.y by given factor, if allow_cast is set to True and division in place is impossible - casting and not in place division may occur occur. If in place is impossible and allow_cast is set to False - an exception is raised. Check simple rescaling by 2 with no casting >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.float) >>> c.rescale(2, allow_cast=False) >>> print(c.y) [0. 2.5 5. ] Check rescaling with floor division >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int) >>> c.rescale(1.5, allow_cast=True) >>> print(c.y) [0 3 6] >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int) >>> c.rescale(-1, allow_cast=True) >>> print(c.y) [ 0 -5 -10] :param factor: rescaling factor, should be a number :param allow_cast: bool - allow division not in place """ try: self.y /= factor except TypeError as e: logger.warning("Division in place is impossible: %s", e) if allow_cast: self.y = self.y / factor else: logger.error("allow_cast flag set to True should help") raise
Rescales self.y by given factor, if allow_cast is set to True and division in place is impossible - casting and not in place division may occur occur. If in place is impossible and allow_cast is set to False - an exception is raised. Check simple rescaling by 2 with no casting >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.float) >>> c.rescale(2, allow_cast=False) >>> print(c.y) [0. 2.5 5. ] Check rescaling with floor division >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int) >>> c.rescale(1.5, allow_cast=True) >>> print(c.y) [0 3 6] >>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int) >>> c.rescale(-1, allow_cast=True) >>> print(c.y) [ 0 -5 -10] :param factor: rescaling factor, should be a number :param allow_cast: bool - allow division not in place
entailment
def change_domain(self, domain): """ Creating new Curve object in memory with domain passed as a parameter. New domain must include in the original domain. Copies values from original curve and uses interpolation to calculate values for new points in domain. Calculate y - values of example curve with changed domain: >>> print(Curve([[0,0], [5, 5], [10, 0]])\ .change_domain([1, 2, 8, 9]).y) [1. 2. 2. 1.] :param domain: set of points representing new domain. Might be a list or np.array. :return: new Curve object with domain set by 'domain' parameter """ logger.info('Running %(name)s.change_domain() with new domain range:[%(ymin)s, %(ymax)s]', {"name": self.__class__, "ymin": np.min(domain), "ymax": np.max(domain)}) # check if new domain includes in the original domain if np.max(domain) > np.max(self.x) or np.min(domain) < np.min(self.x): logger.error('Old domain range: [%(xmin)s, %(xmax)s] does not include new domain range:' '[%(ymin)s, %(ymax)s]', {"xmin": np.min(self.x), "xmax": np.max(self.x), "ymin": np.min(domain), "ymax": np.max(domain)}) raise ValueError('in change_domain():' 'the old domain does not include the new one') y = np.interp(domain, self.x, self.y) # We need to join together domain and values (y) because we are recreating Curve object # (we pass it as argument to self.__class__) # np.dstack((arrays), axis=1) joins given arrays like np.dstack() but it also nests the result # in additional list and this is the reason why we use [0] to remove this extra layer of list like this: # np.dstack([[0, 5, 10], [0, 0, 0]]) gives [[[ 0, 0], [ 5, 0], [10, 0]]] so use dtack()[0] # to get this: [[0,0], [5, 5], [10, 0]] # which is a 2 dimensional array and can be used to create a new Curve object obj = self.__class__(np.dstack((domain, y))[0], **self.__dict__['metadata']) return obj
Creating new Curve object in memory with domain passed as a parameter. New domain must include in the original domain. Copies values from original curve and uses interpolation to calculate values for new points in domain. Calculate y - values of example curve with changed domain: >>> print(Curve([[0,0], [5, 5], [10, 0]])\ .change_domain([1, 2, 8, 9]).y) [1. 2. 2. 1.] :param domain: set of points representing new domain. Might be a list or np.array. :return: new Curve object with domain set by 'domain' parameter
entailment
def rebinned(self, step=0.1, fixp=0): """ Provides effective way to compute new domain basing on step and fixp parameters. Then using change_domain() method to create new object with calculated domain and returns it. fixp doesn't have to be inside original domain. Return domain of a new curve specified by fixp=0 and step=1 and another Curve object: >>> print(Curve([[0,0], [5, 5], [10, 0]]).rebinned(1, 0).x) [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.] :param step: step size of new domain :param fixp: fixed point one of the points in new domain :return: new Curve object with domain specified by step and fixp parameters """ logger.info('Running %(name)s.rebinned(step=%(st)s, fixp=%(fx)s)', {"name": self.__class__, "st": step, "fx": fixp}) a, b = (np.min(self.x), np.max(self.x)) count_start = abs(fixp - a) / step count_stop = abs(fixp - b) / step # depending on position of fixp with respect to the original domain # 3 cases may occur: if fixp < a: count_start = math.ceil(count_start) count_stop = math.floor(count_stop) elif fixp > b: count_start = -math.floor(count_start) count_stop = -math.ceil(count_stop) else: count_start = -count_start count_stop = count_stop domain = [fixp + n * step for n in range(int(count_start), int(count_stop) + 1)] return self.change_domain(domain)
Provides effective way to compute new domain basing on step and fixp parameters. Then using change_domain() method to create new object with calculated domain and returns it. fixp doesn't have to be inside original domain. Return domain of a new curve specified by fixp=0 and step=1 and another Curve object: >>> print(Curve([[0,0], [5, 5], [10, 0]]).rebinned(1, 0).x) [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.] :param step: step size of new domain :param fixp: fixed point one of the points in new domain :return: new Curve object with domain specified by step and fixp parameters
entailment
def evaluate_at_x(self, arg, def_val=0): """ Returns Y value at arg of self. Arg can be a scalar, but also might be np.array or other iterable (like list). If domain of self is not wide enough to interpolate the value of Y, method will return def_val for those arguments instead. Check the interpolation when arg in domain of self: >>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x([1, 2 ,3]) array([1., 2., 3.]) Check if behavior of the method is correct when arg id partly outside the domain: >>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x(\ [-1, 1, 2 ,3, 5], 100) array([100., 1., 2., 3., 100.]) :param arg: x-value to calculate Y (may be an array or list as well) :param def_val: default value to return if can't interpolate at arg :return: np.array of Y-values at arg. If arg is a scalar, will return scalar as well """ y = np.interp(arg, self.x, self.y, left=def_val, right=def_val) return y
Returns Y value at arg of self. Arg can be a scalar, but also might be np.array or other iterable (like list). If domain of self is not wide enough to interpolate the value of Y, method will return def_val for those arguments instead. Check the interpolation when arg in domain of self: >>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x([1, 2 ,3]) array([1., 2., 3.]) Check if behavior of the method is correct when arg id partly outside the domain: >>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x(\ [-1, 1, 2 ,3, 5], 100) array([100., 1., 2., 3., 100.]) :param arg: x-value to calculate Y (may be an array or list as well) :param def_val: default value to return if can't interpolate at arg :return: np.array of Y-values at arg. If arg is a scalar, will return scalar as well
entailment
def subtract(self, curve2, new_obj=False): """ Method that calculates difference between 2 curves (or subclasses of curves). Domain of self must be in domain of curve2 what means min(self.x) >= min(curve2.x) and max(self.x) <= max(curve2.x). Might modify self, and can return the result or None Use subtract as -= operator, check whether returned value is None: >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[-1, 1], [5, 1]])) is None True Use subtract again but return a new object this time. >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[-1, 1], [5, 1]]), new_obj=True).y DataSet([-1., 0., 1., 0.]) Try using wrong inputs to create a new object, and check whether it throws an exception: >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[1, -1], [2, -1]]), new_obj=True) is None Traceback (most recent call last): ... Exception: curve2 does not include self domain :param curve2: second object to calculate difference :param new_obj: if True, method is creating new object instead of modifying self :return: None if new_obj is False (but will modify self) or type(self) object containing the result """ # domain1 = [a1, b1] # domain2 = [a2, b2] a1, b1 = np.min(self.x), np.max(self.x) a2, b2 = np.min(curve2.x), np.max(curve2.x) # check whether domain condition is satisfied if a2 > a1 or b2 < b1: logger.error("Domain of self must be in domain of given curve") raise Exception("curve2 does not include self domain") # if we want to create and return a new object # rather then modify existing one if new_obj: return functions.subtract(self, curve2.change_domain(self.x)) values = curve2.evaluate_at_x(self.x) self.y = self.y - values return None
Method that calculates difference between 2 curves (or subclasses of curves). Domain of self must be in domain of curve2 what means min(self.x) >= min(curve2.x) and max(self.x) <= max(curve2.x). Might modify self, and can return the result or None Use subtract as -= operator, check whether returned value is None: >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[-1, 1], [5, 1]])) is None True Use subtract again but return a new object this time. >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[-1, 1], [5, 1]]), new_obj=True).y DataSet([-1., 0., 1., 0.]) Try using wrong inputs to create a new object, and check whether it throws an exception: >>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\ Curve([[1, -1], [2, -1]]), new_obj=True) is None Traceback (most recent call last): ... Exception: curve2 does not include self domain :param curve2: second object to calculate difference :param new_obj: if True, method is creating new object instead of modifying self :return: None if new_obj is False (but will modify self) or type(self) object containing the result
entailment
def alert(text='', title='', button='OK'): """Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.""" messageBoxFunc(0, text, title, MB_OK | MB_SETFOREGROUND | MB_TOPMOST) return button
Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.
entailment
def confirm(text='', title='', buttons=['OK', 'Cancel']): """Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.""" retVal = messageBoxFunc(0, text, title, MB_OKCANCEL | MB_ICONQUESTION | MB_SETFOREGROUND | MB_TOPMOST) if retVal == 1 or len(buttons) == 1: return buttons[0] elif retVal == 2: return buttons[1] else: assert False, 'Unexpected return value from MessageBox: %s' % (retVal)
Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.
entailment
def subtract(curve1, curve2, def_val=0): """ Function calculates difference between curve1 and curve2 and returns new object which domain is an union of curve1 and curve2 domains Returned object is of type type(curve1) and has same metadata as curve1 object :param curve1: first curve to calculate the difference :param curve2: second curve to calculate the difference :param def_val: default value for points that cannot be interpolated :return: new object of type type(curve1) with element-wise difference (using interpolation if necessary) """ coord1 = np.union1d(curve1.x, curve2.x) y1 = curve1.evaluate_at_x(coord1, def_val) y2 = curve2.evaluate_at_x(coord1, def_val) coord2 = y1 - y2 # the below is explained at the end of curve.Curve.change_domain() obj = curve1.__class__(np.dstack((coord1, coord2))[0], **curve1.__dict__['metadata']) return obj
Function calculates difference between curve1 and curve2 and returns new object which domain is an union of curve1 and curve2 domains Returned object is of type type(curve1) and has same metadata as curve1 object :param curve1: first curve to calculate the difference :param curve2: second curve to calculate the difference :param def_val: default value for points that cannot be interpolated :return: new object of type type(curve1) with element-wise difference (using interpolation if necessary)
entailment
def medfilt(vector, window): """ Apply a window-length median filter to a 1D array vector. Should get rid of 'spike' value 15. >>> print(medfilt(np.array([1., 15., 1., 1., 1.]), 3)) [1. 1. 1. 1. 1.] The 'edge' case is a bit tricky... >>> print(medfilt(np.array([15., 1., 1., 1., 1.]), 3)) [15. 1. 1. 1. 1.] Inspired by: https://gist.github.com/bhawkins/3535131 """ if not window % 2 == 1: raise ValueError("Median filter length must be odd.") if not vector.ndim == 1: raise ValueError("Input must be one-dimensional.") k = (window - 1) // 2 # window movement result = np.zeros((len(vector), window), dtype=vector.dtype) result[:, k] = vector for i in range(k): j = k - i result[j:, i] = vector[:-j] result[:j, i] = vector[0] result[:-j, -(i + 1)] = vector[j:] result[-j:, -(i + 1)] = vector[-1] return np.median(result, axis=1)
Apply a window-length median filter to a 1D array vector. Should get rid of 'spike' value 15. >>> print(medfilt(np.array([1., 15., 1., 1., 1.]), 3)) [1. 1. 1. 1. 1.] The 'edge' case is a bit tricky... >>> print(medfilt(np.array([15., 1., 1., 1., 1.]), 3)) [15. 1. 1. 1. 1.] Inspired by: https://gist.github.com/bhawkins/3535131
entailment
def interpn(*args, **kw): """Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ... """ method = kw.pop('method', 'cubic') if kw: raise ValueError("Unknown arguments: " % kw.keys()) nd = (len(args)-1)//2 if len(args) != 2*nd+1: raise ValueError("Wrong number of arguments") q = args[:nd] qi = args[nd+1:] a = args[nd] for j in range(nd): #print q[j].shape, a.shape a = interp1d(q[j], a, axis=j, kind=method)(qi[j]) return a
Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ...
entailment
def npinterpn(*args, **kw): """Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ... """ method = kw.pop('method', 'cubic') if kw: raise ValueError("Unknown arguments: " % kw.keys()) nd = (len(args)-1)//2 if len(args) != 2*nd+1: raise ValueError("Wrong number of arguments") q = args[:nd] qi = args[nd+1:] a = args[nd] for j in range(nd): #print q[j].shape, a.shape a = interp(q[j], a, axis=j, kind=method)(qi[j]) return a
Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ...
entailment
def model_fields(model, allow_pk=False, only=None, exclude=None, field_args=None, converter=None): """ Generate a dictionary of fields for a given Peewee model. See `model_form` docstring for description of parameters. """ converter = converter or ModelConverter() field_args = field_args or {} model_fields = list(model._meta.sorted_fields) if not allow_pk: model_fields.pop(0) if only: model_fields = [x for x in model_fields if x.name in only] elif exclude: model_fields = [x for x in model_fields if x.name not in exclude] field_dict = {} for model_field in model_fields: name, field = converter.convert( model, model_field, field_args.get(model_field.name)) field_dict[name] = field return field_dict
Generate a dictionary of fields for a given Peewee model. See `model_form` docstring for description of parameters.
entailment
def model_form(model, base_class=Form, allow_pk=False, only=None, exclude=None, field_args=None, converter=None): """ Create a wtforms Form for a given Peewee model class:: from wtfpeewee.orm import model_form from myproject.myapp.models import User UserForm = model_form(User) :param model: A Peewee model class :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ field_dict = model_fields(model, allow_pk, only, exclude, field_args, converter) return type(model.__name__ + 'Form', (base_class, ), field_dict)
Create a wtforms Form for a given Peewee model class:: from wtfpeewee.orm import model_form from myproject.myapp.models import User UserForm = model_form(User) :param model: A Peewee model class :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used.
entailment
def alert(text='', title='', button=OK_TEXT, root=None, timeout=None): """Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.""" assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox' return _buttonbox(msg=text, title=title, choices=[str(button)], root=root, timeout=timeout)
Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.
entailment
def confirm(text='', title='', buttons=[OK_TEXT, CANCEL_TEXT], root=None, timeout=None): """Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.""" assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox' return _buttonbox(msg=text, title=title, choices=[str(b) for b in buttons], root=root, timeout=timeout)
Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.
entailment
def prompt(text='', title='' , default='', root=None, timeout=None): """Displays a message box with text input, and OK & Cancel buttons. Returns the text entered, or None if Cancel was clicked.""" assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox' return __fillablebox(text, title, default=default, mask=None,root=root, timeout=timeout)
Displays a message box with text input, and OK & Cancel buttons. Returns the text entered, or None if Cancel was clicked.
entailment