_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q24800
VerticalPyramid._get_separated_values
train
def _get_separated_values(self, secondary=False): """Separate values between odd and even series stacked""" series = self.secondary_series if secondary else self.series positive_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if index % 2 ] ) ) negative_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if not index % 2 ] ) ) return list(positive_vals), list(negative_vals)
python
{ "resource": "" }
q24801
VerticalPyramid._compute_box
train
def _compute_box(self, positive_vals, negative_vals): """Compute Y min and max""" max_ = max( max(positive_vals or [self.zero]), max(negative_vals or [self.zero]) ) if self.range and self.range[0] is not None: self._box.ymin = self.range[0] else: self._box.ymin = -max_ if self.range and self.range[1] is not None: self._box.ymax = self.range[1] else: self._box.ymax = max_
python
{ "resource": "" }
q24802
BaseGraph.setup
train
def setup(self, **kwargs): """Set up the transient state prior rendering""" # Keep labels in case of map if getattr(self, 'x_labels', None) is not None: self.x_labels = list(self.x_labels) if getattr(self, 'y_labels', None) is not None: self.y_labels = list(self.y_labels) self.state = State(self, **kwargs) if isinstance(self.style, type): self.style = self.style() self.series = self.prepare_values([ rs for rs in self.raw_series if not rs[1].get('secondary') ]) or [] self.secondary_series = self.prepare_values([ rs for rs in self.raw_series if rs[1].get('secondary') ], len(self.series)) or [] self.horizontal = getattr(self, 'horizontal', False) self.svg = Svg(self) self._x_labels = None self._y_labels = None self._x_2nd_labels = None self._y_2nd_labels = None self.nodes = {} self.margin_box = Margin( self.margin_top or self.margin, self.margin_right or self.margin, self.margin_bottom or self.margin, self.margin_left or self.margin ) self._box = Box() self.view = None if self.logarithmic and self.zero == 0: # Explicit min to avoid interpolation dependency positive_values = list( filter( lambda x: x > 0, [ val[1] or 1 if self._dual else val for serie in self.series for val in serie.safe_values ] ) ) self.zero = min(positive_values or (1, )) or 1 if self._len < 3: self.interpolate = None self._draw() self.svg.pre_render()
python
{ "resource": "" }
q24803
rgb_to_hsl
train
def rgb_to_hsl(r, g, b): """Convert a color in r, g, b to a color in h, s, l""" r = r or 0 g = g or 0 b = b or 0 r /= 255 g /= 255 b /= 255 max_ = max((r, g, b)) min_ = min((r, g, b)) d = max_ - min_ if not d: h = 0 elif r is max_: h = 60 * (g - b) / d elif g is max_: h = 60 * (b - r) / d + 120 else: h = 60 * (r - g) / d + 240 l = .5 * (max_ + min_) if not d: s = 0 elif l < 0.5: s = .5 * d / l else: s = .5 * d / (1 - l) return tuple(map(normalize_float, (h % 360, s * 100, l * 100)))
python
{ "resource": "" }
q24804
hsl_to_rgb
train
def hsl_to_rgb(h, s, l): """Convert a color in h, s, l to a color in r, g, b""" h /= 360 s /= 100 l /= 100 m2 = l * (s + 1) if l <= .5 else l + s - l * s m1 = 2 * l - m2 def h_to_rgb(h): h = h % 1 if 6 * h < 1: return m1 + 6 * h * (m2 - m1) if 2 * h < 1: return m2 if 3 * h < 2: return m1 + 6 * (2 / 3 - h) * (m2 - m1) return m1 r, g, b = map( lambda x: round(x * 255), map(h_to_rgb, (h + 1 / 3, h, h - 1 / 3)) ) return r, g, b
python
{ "resource": "" }
q24805
unparse_color
train
def unparse_color(r, g, b, a, type): """ Take the r, g, b, a color values and give back a type css color string. This is the inverse function of parse_color """ if type == '#rgb': # Don't lose precision on rgb shortcut if r % 17 == 0 and g % 17 == 0 and b % 17 == 0: return '#%x%x%x' % (int(r / 17), int(g / 17), int(b / 17)) type = '#rrggbb' if type == '#rgba': if r % 17 == 0 and g % 17 == 0 and b % 17 == 0: return '#%x%x%x%x' % ( int(r / 17), int(g / 17), int(b / 17), int(a * 15) ) type = '#rrggbbaa' if type == '#rrggbb': return '#%02x%02x%02x' % (r, g, b) if type == '#rrggbbaa': return '#%02x%02x%02x%02x' % (r, g, b, int(a * 255)) if type == 'rgb': return 'rgb(%d, %d, %d)' % (r, g, b) if type == 'rgba': return 'rgba(%d, %d, %d, %g)' % (r, g, b, a)
python
{ "resource": "" }
q24806
_adjust
train
def _adjust(hsl, attribute, percent): """Internal adjust function""" hsl = list(hsl) if attribute > 0: hsl[attribute] = _clamp(hsl[attribute] + percent) else: hsl[attribute] += percent return hsl
python
{ "resource": "" }
q24807
adjust
train
def adjust(color, attribute, percent): """Adjust an attribute of color by a percent""" r, g, b, a, type = parse_color(color) r, g, b = hsl_to_rgb(*_adjust(rgb_to_hsl(r, g, b), attribute, percent)) return unparse_color(r, g, b, a, type)
python
{ "resource": "" }
q24808
do_list
train
def do_list(lookup, term): """Matches term glob against short-name.""" space = lookup.keys() matches = fnmatch.filter(space, term) return [(m, translate(lookup, m)) for m in matches]
python
{ "resource": "" }
q24809
do_find
train
def do_find(lookup, term): """Matches term glob against short-name, keywords and categories.""" space = defaultdict(list) for name in lookup.keys(): space[name].append(name) try: iter_lookup = lookup.iteritems() # Python 2 except AttributeError: iter_lookup = lookup.items() # Python 3 for name, definition in iter_lookup: for keyword in definition['keywords']: space[keyword].append(name) space[definition['category']].append(name) matches = fnmatch.filter(space.keys(), term) results = set() for match in matches: results.update(space[match]) return [(r, translate(lookup, r)) for r in results]
python
{ "resource": "" }
q24810
compile
train
def compile(expr, params=None): """ Force compilation of expression for the SQLite target """ from ibis.sql.alchemy import to_sqlalchemy return to_sqlalchemy(expr, dialect.make_context(params=params))
python
{ "resource": "" }
q24811
HDFS.put_tarfile
train
def put_tarfile( self, hdfs_path, local_path, compression='gzip', verbose=None, overwrite=False, ): """ Write contents of tar archive to HDFS directly without having to decompress it locally first Parameters ---------- hdfs_path : string local_path : string compression : {'gzip', 'bz2', None} overwrite : boolean, default False verbose : boolean, default None (global default) """ import tarfile modes = {None: 'r', 'gzip': 'r:gz', 'bz2': 'r:bz2'} if compression not in modes: raise ValueError( 'Invalid compression type {0}'.format(compression) ) mode = modes[compression] tf = tarfile.open(local_path, mode=mode) for info in tf: if not info.isfile(): continue buf = tf.extractfile(info) abspath = posixpath.join(hdfs_path, info.path) self.put(abspath, buf, verbose=verbose, overwrite=overwrite)
python
{ "resource": "" }
q24812
WebHDFS.delete
train
def delete(self, hdfs_path, recursive=False): """Delete a file located at `hdfs_path`.""" return self.client.delete(hdfs_path, recursive=recursive)
python
{ "resource": "" }
q24813
_build_option_description
train
def _build_option_description(k): """Builds a formatted description of a registered option and prints it.""" o = _get_registered_option(k) d = _get_deprecated_option(k) buf = ['{} '.format(k)] if o.doc: doc = '\n'.join(o.doc.strip().splitlines()) else: doc = 'No description available.' buf.append(doc) if o: buf.append( '\n [default: {}] [currently: {}]'.format( o.defval, _get_option(k, True) ) ) if d: buf.append( '\n (Deprecated{})'.format( ', use `{}` instead.'.format(d.rkey) if d.rkey else '' ) ) buf.append('\n\n') return ''.join(buf)
python
{ "resource": "" }
q24814
pp_options_list
train
def pp_options_list(keys, width=80, _print=False): """ Builds a concise listing of available options, grouped by prefix """ from textwrap import wrap from itertools import groupby def pp(name, ks): pfx = '- ' + name + '.[' if name else '' ls = wrap( ', '.join(ks), width, initial_indent=pfx, subsequent_indent=' ', break_long_words=False, ) if ls and ls[-1] and name: ls[-1] = ls[-1] + ']' return ls ls = [] singles = [x for x in sorted(keys) if x.find('.') < 0] if singles: ls += pp('', singles) keys = [x for x in keys if x.find('.') >= 0] for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]): ks = [x[len(k) + 1 :] for x in list(g)] ls += pp(k, ks) s = '\n'.join(ls) if _print: print(s) else: return s
python
{ "resource": "" }
q24815
all_equal
train
def all_equal(left, right, cache=None): """Check whether two objects `left` and `right` are equal. Parameters ---------- left : Union[object, Expr, Node] right : Union[object, Expr, Node] cache : Optional[Dict[Tuple[Node, Node], bool]] A dictionary indicating whether two Nodes are equal """ if cache is None: cache = {} if util.is_iterable(left): # check that left and right are equal length iterables and that all # of their elements are equal return ( util.is_iterable(right) and len(left) == len(right) and all( itertools.starmap( functools.partial(all_equal, cache=cache), zip(left, right) ) ) ) if hasattr(left, 'equals'): return left.equals(right, cache=cache) return left == right
python
{ "resource": "" }
q24816
SQLClient.table
train
def table(self, name, database=None): """ Create a table expression that references a particular table in the database Parameters ---------- name : string database : string, optional Returns ------- table : TableExpr """ qualified_name = self._fully_qualified_name(name, database) schema = self._get_table_schema(qualified_name) node = self.table_class(qualified_name, schema, self) return self.table_expr_class(node)
python
{ "resource": "" }
q24817
SQLClient.sql
train
def sql(self, query): """ Convert a SQL query to an Ibis table expression Parameters ---------- Returns ------- table : TableExpr """ # Get the schema by adding a LIMIT 0 on to the end of the query. If # there is already a limit in the query, we find and remove it limited_query = 'SELECT * FROM ({}) t0 LIMIT 0'.format(query) schema = self._get_schema_using_query(limited_query) return ops.SQLQueryResult(query, schema, self).to_expr()
python
{ "resource": "" }
q24818
SQLClient.raw_sql
train
def raw_sql(self, query, results=False): """ Execute a given query string. Could have unexpected results if the query modifies the behavior of the session in a way unknown to Ibis; be careful. Parameters ---------- query : string DML or DDL statement results : boolean, default False Pass True if the query as a result set Returns ------- cur : ImpalaCursor if results=True, None otherwise You must call cur.release() after you are finished using the cursor. """ return self._execute(query, results=results)
python
{ "resource": "" }
q24819
SQLClient.execute
train
def execute(self, expr, params=None, limit='default', **kwargs): """ Compile and execute Ibis expression using this backend client interface, returning results in-memory in the appropriate object type Parameters ---------- expr : Expr limit : int, default None For expressions yielding result yets; retrieve at most this number of values/rows. Overrides any limit already set on the expression. params : not yet implemented Returns ------- output : input type dependent Table expressions: pandas.DataFrame Array expressions: pandas.Series Scalar expressions: Python scalar value """ query_ast = self._build_ast_ensure_limit(expr, limit, params=params) result = self._execute_query(query_ast, **kwargs) return result
python
{ "resource": "" }
q24820
SQLClient.compile
train
def compile(self, expr, params=None, limit=None): """ Translate expression to one or more queries according to backend target Returns ------- output : single query or list of queries """ query_ast = self._build_ast_ensure_limit(expr, limit, params=params) return query_ast.compile()
python
{ "resource": "" }
q24821
SQLClient.explain
train
def explain(self, expr, params=None): """ Query for and return the query plan associated with the indicated expression or SQL query. Returns ------- plan : string """ if isinstance(expr, ir.Expr): context = self.dialect.make_context(params=params) query_ast = self._build_ast(expr, context) if len(query_ast.queries) > 1: raise Exception('Multi-query expression') query = query_ast.queries[0].compile() else: query = expr statement = 'EXPLAIN {0}'.format(query) with self._execute(statement, results=True) as cur: result = self._get_list(cur) return 'Query:\n{0}\n\n{1}'.format( util.indent(query, 2), '\n'.join(result) )
python
{ "resource": "" }
q24822
Database.table
train
def table(self, name): """ Return a table expression referencing a table in this database Returns ------- table : TableExpr """ qualified_name = self._qualify(name) return self.client.table(qualified_name, self.name)
python
{ "resource": "" }
q24823
cleanup
train
def cleanup(test_data, udfs, tmp_data, tmp_db): """Cleanup Ibis test data and UDFs""" con = make_ibis_client(ENV) if udfs: # this comes before test_data bc the latter clobbers this too con.hdfs.rmdir(os.path.join(ENV.test_data_dir, 'udf')) if test_data: con.drop_database(ENV.test_data_db, force=True) con.hdfs.rmdir(ENV.test_data_dir) if tmp_data: con.hdfs.rmdir(ENV.tmp_dir) if tmp_db: con.drop_database(ENV.tmp_db, force=True)
python
{ "resource": "" }
q24824
sub_for
train
def sub_for(expr, substitutions): """Substitute subexpressions in `expr` with expression to expression mapping `substitutions`. Parameters ---------- expr : ibis.expr.types.Expr An Ibis expression substitutions : List[Tuple[ibis.expr.types.Expr, ibis.expr.types.Expr]] A mapping from expression to expression. If any subexpression of `expr` is equal to any of the keys in `substitutions`, the value for that key will replace the corresponding expression in `expr`. Returns ------- ibis.expr.types.Expr An Ibis expression """ mapping = {k.op(): v for k, v in substitutions} substitutor = Substitutor() return substitutor.substitute(expr, mapping)
python
{ "resource": "" }
q24825
has_reduction
train
def has_reduction(expr): """Does `expr` contain a reduction? Parameters ---------- expr : ibis.expr.types.Expr An ibis expression Returns ------- truth_value : bool Whether or not there's at least one reduction in `expr` Notes ----- The ``isinstance(op, ops.TableNode)`` check in this function implies that we only examine every non-table expression that precedes the first table expression. """ def fn(expr): op = expr.op() if isinstance(op, ops.TableNode): # don't go below any table nodes return lin.halt, None if isinstance(op, ops.Reduction): return lin.halt, True return lin.proceed, None reduction_status = lin.traverse(fn, expr) return any(reduction_status)
python
{ "resource": "" }
q24826
find_source_table
train
def find_source_table(expr): """Find the first table expression observed for each argument that the expression depends on Parameters ---------- expr : ir.Expr Returns ------- table_expr : ir.TableExpr Examples -------- >>> import ibis >>> t = ibis.table([('a', 'double'), ('b', 'string')], name='t') >>> expr = t.mutate(c=t.a + 42.0) >>> expr # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: t schema: a : float64 b : string Selection[table] table: Table: ref_0 selections: Table: ref_0 c = Add[float64*] left: a = Column[float64*] 'a' from table ref_0 right: Literal[float64] 42.0 >>> find_source_table(expr) UnboundTable[table] name: t schema: a : float64 b : string >>> left = ibis.table([('a', 'int64'), ('b', 'string')]) >>> right = ibis.table([('c', 'int64'), ('d', 'string')]) >>> result = left.inner_join(right, left.a == right.c) >>> find_source_table(result) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: More than one base table not implemented """ def finder(expr): if isinstance(expr, ir.TableExpr): return lin.halt, expr else: return lin.proceed, None first_tables = lin.traverse(finder, expr.op().flat_args()) options = list(toolz.unique(first_tables, key=methodcaller('op'))) if len(options) > 1: raise NotImplementedError('More than one base table not implemented') return options[0]
python
{ "resource": "" }
q24827
flatten_predicate
train
def flatten_predicate(expr): """Yield the expressions corresponding to the `And` nodes of a predicate. Parameters ---------- expr : ir.BooleanColumn Returns ------- exprs : List[ir.BooleanColumn] Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')], name='t') >>> filt = (t.a == 1) & (t.b == 'foo') >>> predicates = flatten_predicate(filt) >>> len(predicates) 2 >>> predicates[0] # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: t schema: a : int64 b : string Equals[boolean*] left: a = Column[int64*] 'a' from table ref_0 right: Literal[int64] 1 >>> predicates[1] # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: t schema: a : int64 b : string Equals[boolean*] left: b = Column[string*] 'b' from table ref_0 right: Literal[string] foo """ def predicate(expr): if isinstance(expr.op(), ops.And): return lin.proceed, None else: return lin.halt, expr return list(lin.traverse(predicate, expr, type=ir.BooleanColumn))
python
{ "resource": "" }
q24828
is_reduction
train
def is_reduction(expr): """Check whether an expression is a reduction or not Aggregations yield typed scalar expressions, since the result of an aggregation is a single value. When creating an table expression containing a GROUP BY equivalent, we need to be able to easily check that we are looking at the result of an aggregation. As an example, the expression we are looking at might be something like: foo.sum().log10() + bar.sum().log10() We examine the operator DAG in the expression to determine if there are aggregations present. A bound aggregation referencing a separate table is a "false aggregation" in a GROUP BY-type expression and should be treated a literal, and must be computed as a separate query and stored in a temporary variable (or joined, for bound aggregations with keys) Parameters ---------- expr : ir.Expr Returns ------- check output : bool """ def has_reduction(op): if getattr(op, '_reduction', False): return True for arg in op.args: if isinstance(arg, ir.ScalarExpr) and has_reduction(arg.op()): return True return False return has_reduction(expr.op() if isinstance(expr, ir.Expr) else expr)
python
{ "resource": "" }
q24829
Substitutor._substitute
train
def _substitute(self, expr, mapping): """Substitute expressions with other expressions. Parameters ---------- expr : ibis.expr.types.Expr mapping : Mapping[ibis.expr.operations.Node, ibis.expr.types.Expr] Returns ------- ibis.expr.types.Expr """ node = expr.op() try: return mapping[node] except KeyError: if node.blocks(): return expr new_args = list(node.args) unchanged = True for i, arg in enumerate(new_args): if isinstance(arg, ir.Expr): new_arg = self.substitute(arg, mapping) unchanged = unchanged and new_arg is arg new_args[i] = new_arg if unchanged: return expr try: new_node = type(node)(*new_args) except IbisTypeError: return expr try: name = expr.get_name() except ExpressionError: name = None return expr._factory(new_node, name=name)
python
{ "resource": "" }
q24830
literal
train
def literal(value, type=None): """Create a scalar expression from a Python value. Parameters ---------- value : some Python basic type A Python value type : ibis type or string, optional An instance of :class:`ibis.expr.datatypes.DataType` or a string indicating the ibis type of `value`. This parameter should only be used in cases where ibis's type inference isn't sufficient for discovering the type of `value`. Returns ------- literal_value : Literal An expression representing a literal value Examples -------- >>> import ibis >>> x = ibis.literal(42) >>> x.type() int8 >>> y = ibis.literal(42, type='double') >>> y.type() float64 >>> ibis.literal('foobar', type='int64') # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: Value 'foobar' cannot be safely coerced to int64 """ import ibis.expr.datatypes as dt import ibis.expr.operations as ops if hasattr(value, 'op') and isinstance(value.op(), ops.Literal): return value try: inferred_dtype = dt.infer(value) except com.InputTypeError: has_inferred = False else: has_inferred = True if type is None: has_explicit = False else: has_explicit = True explicit_dtype = dt.dtype(type) if has_explicit and has_inferred: try: # ensure type correctness: check that the inferred dtype is # implicitly castable to the explicitly given dtype and value dtype = inferred_dtype.cast(explicit_dtype, value=value) except com.IbisTypeError: raise TypeError( 'Value {!r} cannot be safely coerced to {}'.format(value, type) ) elif has_explicit: dtype = explicit_dtype elif has_inferred: dtype = inferred_dtype else: raise TypeError( 'The datatype of value {!r} cannot be inferred, try ' 'passing it explicitly with the `type` keyword.'.format(value) ) if dtype is dt.null: return null().cast(dtype) else: return ops.Literal(value, dtype=dtype).to_expr()
python
{ "resource": "" }
q24831
sequence
train
def sequence(values): """ Wrap a list of Python values as an Ibis sequence type Parameters ---------- values : list Should all be None or the same type Returns ------- seq : Sequence """ import ibis.expr.operations as ops return ops.ValueList(values).to_expr()
python
{ "resource": "" }
q24832
param
train
def param(type): """Create a parameter of a particular type to be defined just before execution. Parameters ---------- type : dt.DataType The type of the unbound parameter, e.g., double, int64, date, etc. Returns ------- ScalarExpr Examples -------- >>> import ibis >>> import ibis.expr.datatypes as dt >>> start = ibis.param(dt.date) >>> end = ibis.param(dt.date) >>> schema = [('timestamp_col', 'timestamp'), ('value', 'double')] >>> t = ibis.table(schema) >>> predicates = [t.timestamp_col >= start, t.timestamp_col <= end] >>> expr = t.filter(predicates).value.sum() """ import ibis.expr.datatypes as dt import ibis.expr.operations as ops return ops.ScalarParameter(dt.dtype(type)).to_expr()
python
{ "resource": "" }
q24833
Expr.visualize
train
def visualize(self, format='svg'): """Visualize an expression in the browser as an SVG image. Parameters ---------- format : str, optional Defaults to ``'svg'``. Some additional formats are ``'jpeg'`` and ``'png'``. These are specified by the ``graphviz`` Python library. Notes ----- This method opens a web browser tab showing the image of the expression graph created by the code in :module:`ibis.expr.visualize`. Raises ------ ImportError If ``graphviz`` is not installed. """ import ibis.expr.visualize as viz path = viz.draw(viz.to_graph(self), format=format) webbrowser.open('file://{}'.format(os.path.abspath(path)))
python
{ "resource": "" }
q24834
Expr.pipe
train
def pipe(self, f, *args, **kwargs): """Generic composition function to enable expression pipelining. Parameters ---------- f : function or (function, arg_name) tuple If the expression needs to be passed as anything other than the first argument to the function, pass a tuple with the argument name. For example, (f, 'data') if the function f expects a 'data' keyword args : positional arguments kwargs : keyword arguments Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')], name='t') >>> f = lambda a: (a + 1).name('a') >>> g = lambda a: (a * 2).name('a') >>> result1 = t.a.pipe(f).pipe(g) >>> result1 # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: t schema: a : int64 b : string a = Multiply[int64*] left: a = Add[int64*] left: a = Column[int64*] 'a' from table ref_0 right: Literal[int8] 1 right: Literal[int8] 2 >>> result2 = g(f(t.a)) # equivalent to the above >>> result1.equals(result2) True Returns ------- result : result type of passed function """ if isinstance(f, tuple): f, data_keyword = f kwargs = kwargs.copy() kwargs[data_keyword] = self return f(*args, **kwargs) else: return f(self, *args, **kwargs)
python
{ "resource": "" }
q24835
Expr.execute
train
def execute(self, limit='default', params=None, **kwargs): """ If this expression is based on physical tables in a database backend, execute it against that backend. Parameters ---------- limit : integer or None, default 'default' Pass an integer to effect a specific row limit. limit=None means "no limit". The default is whatever is in ibis.options. Returns ------- result : expression-dependent Result of compiling expression and executing in backend """ from ibis.client import execute return execute(self, limit=limit, params=params, **kwargs)
python
{ "resource": "" }
q24836
Expr.compile
train
def compile(self, limit=None, params=None): """ Compile expression to whatever execution target, to verify Returns ------- compiled : value or list query representation or list thereof """ from ibis.client import compile return compile(self, limit=limit, params=params)
python
{ "resource": "" }
q24837
ExprList.concat
train
def concat(self, *others): """ Concatenate expression lists Returns ------- combined : ExprList """ import ibis.expr.operations as ops exprs = list(self.exprs()) for o in others: if not isinstance(o, ExprList): raise TypeError(o) exprs.extend(o.exprs()) return ops.ExpressionList(exprs).to_expr()
python
{ "resource": "" }
q24838
ColumnExpr.to_projection
train
def to_projection(self): """ Promote this column expression to a table projection """ roots = self._root_tables() if len(roots) > 1: raise com.RelationError( 'Cannot convert array expression ' 'involving multiple base table references ' 'to a projection' ) table = TableExpr(roots[0]) return table.projection([self])
python
{ "resource": "" }
q24839
TableExpr.get_column
train
def get_column(self, name): """ Get a reference to a single column from the table Returns ------- column : array expression """ import ibis.expr.operations as ops ref = ops.TableColumn(name, self) return ref.to_expr()
python
{ "resource": "" }
q24840
TableExpr.group_by
train
def group_by(self, by=None, **additional_grouping_expressions): """ Create an intermediate grouped table expression, pending some group operation to be applied with it. Examples -------- >>> import ibis >>> pairs = [('a', 'int32'), ('b', 'timestamp'), ('c', 'double')] >>> t = ibis.table(pairs) >>> b1, b2 = t.a, t.b >>> result = t.group_by([b1, b2]).aggregate(sum_of_c=t.c.sum()) Notes ----- group_by and groupby are equivalent, with `groupby` being provided for ease-of-use for pandas users. Returns ------- grouped_expr : GroupedTableExpr """ from ibis.expr.groupby import GroupedTableExpr return GroupedTableExpr(self, by, **additional_grouping_expressions)
python
{ "resource": "" }
q24841
TopKExpr.to_aggregation
train
def to_aggregation( self, metric_name=None, parent_table=None, backup_metric_name=None ): """ Convert the TopK operation to a table aggregation """ op = self.op() arg_table = find_base_table(op.arg) by = op.by if not isinstance(by, Expr): by = by(arg_table) by_table = arg_table else: by_table = find_base_table(op.by) if metric_name is None: if by.get_name() == op.arg.get_name(): by = by.name(backup_metric_name) else: by = by.name(metric_name) if arg_table.equals(by_table): agg = arg_table.aggregate(by, by=[op.arg]) elif parent_table is not None: agg = parent_table.aggregate(by, by=[op.arg]) else: raise com.IbisError( 'Cross-table TopK; must provide a parent ' 'joined table' ) return agg.sort_by([(by.get_name(), False)]).limit(op.k)
python
{ "resource": "" }
q24842
DayOfWeek.index
train
def index(self): """Get the index of the day of the week. Returns ------- IntegerValue The index of the day of the week. Ibis follows pandas conventions, where **Monday = 0 and Sunday = 6**. """ import ibis.expr.operations as ops return ops.DayOfWeekIndex(self.op().arg).to_expr()
python
{ "resource": "" }
q24843
DayOfWeek.full_name
train
def full_name(self): """Get the name of the day of the week. Returns ------- StringValue The name of the day of the week """ import ibis.expr.operations as ops return ops.DayOfWeekName(self.op().arg).to_expr()
python
{ "resource": "" }
q24844
indent
train
def indent(lines, spaces=4): """Indent `lines` by `spaces` spaces. Parameters ---------- lines : Union[str, List[str]] A string or list of strings to indent spaces : int The number of spaces to indent `lines` Returns ------- indented_lines : str """ if isinstance(lines, str): text = [lines] text = '\n'.join(lines) return textwrap.indent(text, ' ' * spaces)
python
{ "resource": "" }
q24845
PythonToJavaScriptTranslator.local_scope
train
def local_scope(self): """Assign symbols to local variables. """ self.scope = self.scope.new_child() try: yield self.scope finally: self.scope = self.scope.parents
python
{ "resource": "" }
q24846
PythonToJavaScriptTranslator.visit_ListComp
train
def visit_ListComp(self, node): """Generate a curried lambda function [x + y for x, y in [[1, 4], [2, 5], [3, 6]]] becomes [[1, 4], [2, 5], [3, 6]]].map(([x, y]) => x + y) """ try: generator, = node.generators except ValueError: raise NotImplementedError( 'Only single loop comprehensions are allowed' ) names = find_names(generator.target) argslist = [ast.arg(arg=name.id, annotation=None) for name in names] if len(names) <= 1: signature = ast.arguments( args=argslist, vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) else: signature = ast.List(elts=argslist, ctx=ast.Load()) array = generator.iter lam_sig = functools.partial(ast.Lambda, args=signature) filters = generator.ifs if filters: filt = ast.BoolOp(op=ast.And(), values=filters) # array.filter method = ast.Attribute(value=array, attr='filter', ctx=ast.Load()) # array.filter(func) array = ast.Call( func=method, args=[lam_sig(body=filt)], keywords=[] ) method = ast.Attribute(value=array, attr='map', ctx=ast.Load()) mapped = ast.Call( func=method, args=[lam_sig(body=node.elt)], keywords=[] ) result = self.visit(mapped) return result
python
{ "resource": "" }
q24847
convert_timezone
train
def convert_timezone(obj, timezone): """Convert `obj` to the timezone `timezone`. Parameters ---------- obj : datetime.date or datetime.datetime Returns ------- type(obj) """ if timezone is None: return obj.replace(tzinfo=None) return pytz.timezone(timezone).localize(obj)
python
{ "resource": "" }
q24848
ibis_schema_apply_to
train
def ibis_schema_apply_to(schema, df): """Applies the Ibis schema to a pandas DataFrame Parameters ---------- schema : ibis.schema.Schema df : pandas.DataFrame Returns ------- df : pandas.DataFrame Notes ----- Mutates `df` """ for column, dtype in schema.items(): pandas_dtype = dtype.to_pandas() col = df[column] col_dtype = col.dtype try: not_equal = pandas_dtype != col_dtype except TypeError: # ugh, we can't compare dtypes coming from pandas, assume not equal not_equal = True if not_equal or dtype == dt.string: df[column] = convert(col_dtype, dtype, col) return df
python
{ "resource": "" }
q24849
MySQLClient.table
train
def table(self, name, database=None, schema=None): """Create a table expression that references a particular a table called `name` in a MySQL database called `database`. Parameters ---------- name : str The name of the table to retrieve. database : str, optional The database in which the table referred to by `name` resides. If ``None`` then the ``current_database`` is used. schema : str, optional The schema in which the table resides. If ``None`` then the `public` schema is assumed. Returns ------- table : TableExpr A table expression. """ if database is not None and database != self.current_database: return self.database(name=database).table(name=name, schema=schema) else: alch_table = self._get_sqla_table(name, schema=schema) node = self.table_class(alch_table, self, self._schemas.get(name)) return self.table_expr_class(node)
python
{ "resource": "" }
q24850
_generate_tokens
train
def _generate_tokens(pat: GenericAny, text: str) -> Iterator[Token]: """Generate a sequence of tokens from `text` that match `pat` Parameters ---------- pat : compiled regex The pattern to use for tokenization text : str The text to tokenize """ rules = _TYPE_RULES keys = _TYPE_KEYS groupindex = pat.groupindex scanner = pat.scanner(text) for m in iter(scanner.match, None): lastgroup = m.lastgroup func = rules[keys[groupindex[lastgroup] - 1]] if func is not None: yield func(m.group(lastgroup))
python
{ "resource": "" }
q24851
cast
train
def cast( source: Union[DataType, str], target: Union[DataType, str], **kwargs ) -> DataType: """Attempts to implicitly cast from source dtype to target dtype""" source, result_target = dtype(source), dtype(target) if not castable(source, result_target, **kwargs): raise com.IbisTypeError( 'Datatype {} cannot be implicitly ' 'casted to {}'.format(source, result_target) ) return result_target
python
{ "resource": "" }
q24852
verify
train
def verify(expr, params=None): """ Determine if expression can be successfully translated to execute on MapD """ try: compile(expr, params=params) return True except com.TranslationError: return False
python
{ "resource": "" }
q24853
connect
train
def connect( uri=None, user=None, password=None, host=None, port=9091, database=None, protocol='binary', execution_type=EXECUTION_TYPE_CURSOR, ): """Create a MapDClient for use with Ibis Parameters could be :param uri: str :param user: str :param password: str :param host: str :param port: int :param database: str :param protocol: str :param execution_type: int Returns ------- MapDClient """ client = MapDClient( uri=uri, user=user, password=password, host=host, port=port, database=database, protocol=protocol, execution_type=execution_type, ) if options.default_backend is None: options.default_backend = client return client
python
{ "resource": "" }
q24854
create_udf_node
train
def create_udf_node(name, fields): """Create a new UDF node type. Parameters ---------- name : str Then name of the UDF node fields : OrderedDict Mapping of class member name to definition Returns ------- result : type A new BigQueryUDFNode subclass """ definition = next(_udf_name_cache[name]) external_name = '{}_{:d}'.format(name, definition) return type(external_name, (BigQueryUDFNode,), fields)
python
{ "resource": "" }
q24855
execute_cross_join
train
def execute_cross_join(op, left, right, **kwargs): """Execute a cross join in pandas. Notes ----- We create a dummy column of all :data:`True` instances and use that as the join key. This results in the desired Cartesian product behavior guaranteed by cross join. """ # generate a unique name for the temporary join key key = "cross_join_{}".format(ibis.util.guid()) join_key = {key: True} new_left = left.assign(**join_key) new_right = right.assign(**join_key) # inner/outer doesn't matter because every row matches every other row result = pd.merge( new_left, new_right, how='inner', on=key, copy=False, suffixes=constants.JOIN_SUFFIXES, ) # remove the generated key del result[key] return result
python
{ "resource": "" }
q24856
merge_pr
train
def merge_pr( pr_num: int, base_ref: str, target_ref: str, commit_title: str, body: str, pr_repo_desc: str, original_head: str, remote: str, merge_method: str, github_user: str, password: str, ) -> None: """Merge a pull request.""" git_log = git[ "log", "{remote}/{target_ref}..{base_ref}".format( remote=remote, target_ref=target_ref, base_ref=base_ref ), ] commit_authors = git_log["--pretty=format:%an <%ae>"]().splitlines() author_count = collections.Counter(commit_authors) distinct_authors = [author for author, _ in author_count.most_common()] commits = git_log["--pretty=format:%h [%an] %s"]().splitlines() merge_message_pieces = [] if body: merge_message_pieces.append("\n".join(textwrap.wrap(body))) merge_message_pieces.extend(map("Author: {}".format, distinct_authors)) # The string "Closes #{pull_request_number:d}" is required for GitHub to # correctly close the PR merge_message_pieces.append( ( "\nCloses #{pr_num:d} from {pr_repo_desc} and squashes the " "following commits:\n" ).format(pr_num=pr_num, pr_repo_desc=pr_repo_desc) ) merge_message_pieces += commits commit_message = "\n".join(merge_message_pieces) # PUT /repos/:owner/:repo/pulls/:number/merge resp = requests.put( "{GITHUB_API_BASE}/pulls/{pr_num:d}/merge".format( GITHUB_API_BASE=GITHUB_API_BASE, pr_num=pr_num ), json=dict( commit_title=commit_title, commit_message=commit_message, merge_method=merge_method, ), auth=(github_user, password), ) resp.raise_for_status() if resp.status_code == 200: resp_json = resp.json() merged = resp_json["merged"] assert merged is True, merged click.echo( "Pull request #{pr_num:d} successfully merged.".format( pr_num=pr_num ) )
python
{ "resource": "" }
q24857
execute_series_lead_lag_timedelta
train
def execute_series_lead_lag_timedelta( op, data, offset, default, aggcontext=None, **kwargs ): """An implementation of shifting a column relative to another one that is in units of time rather than rows. """ # lagging adds time (delayed), leading subtracts time (moved up) func = operator.add if isinstance(op, ops.Lag) else operator.sub group_by = aggcontext.group_by order_by = aggcontext.order_by # get the parent object from which `data` originated parent = aggcontext.parent # get the DataFrame from the parent object, handling the DataFrameGroupBy # case parent_df = getattr(parent, 'obj', parent) # index our parent df by grouping and ordering keys indexed_original_df = parent_df.set_index(group_by + order_by) # perform the time shift adjusted_parent_df = parent_df.assign( **{k: func(parent_df[k], offset) for k in order_by} ) # index the parent *after* adjustment adjusted_indexed_parent = adjusted_parent_df.set_index(group_by + order_by) # get the column we care about result = adjusted_indexed_parent[getattr(data, 'obj', data).name] # reindex the shifted data by the original frame's index result = result.reindex(indexed_original_df.index) # add a default if necessary return post_lead_lag(result, default)
python
{ "resource": "" }
q24858
MapDTable.load_data
train
def load_data(self, df): """ Wraps the LOAD DATA DDL statement. Loads data into an MapD table from pandas.DataFrame or pyarrow.Table Parameters ---------- df: pandas.DataFrame or pyarrow.Table Returns ------- query : MapDQuery """ stmt = ddl.LoadData(self._qualified_name, df) return self._execute(stmt)
python
{ "resource": "" }
q24859
MapDTable.rename
train
def rename(self, new_name, database=None): """ Rename table inside MapD. References to the old table are no longer valid. Parameters ---------- new_name : string database : string Returns ------- renamed : MapDTable """ m = ddl.fully_qualified_re.match(new_name) if not m and database is None: database = self._database statement = ddl.RenameTable( self._qualified_name, new_name, new_database=database ) self._client._execute(statement) op = self.op().change_name(statement.new_qualified_name) return type(self)(op)
python
{ "resource": "" }
q24860
MapDClient.create_database
train
def create_database(self, name, owner=None): """ Create a new MapD database Parameters ---------- name : string Database name """ statement = ddl.CreateDatabase(name, owner=owner) self._execute(statement)
python
{ "resource": "" }
q24861
MapDClient.drop_database
train
def drop_database(self, name, force=False): """ Drop an MapD database Parameters ---------- name : string Database name force : boolean, default False If False and there are any tables in this database, raises an IntegrityError """ tables = [] if not force or self.database(name): tables = self.list_tables(database=name) if not force and len(tables): raise com.IntegrityError( 'Database {0} must be empty before being dropped, or set ' 'force=True'.format(name) ) statement = ddl.DropDatabase(name) self._execute(statement)
python
{ "resource": "" }
q24862
MapDClient.create_user
train
def create_user(self, name, password, is_super=False): """ Create a new MapD user Parameters ---------- name : string User name password : string Password is_super : bool if user is a superuser """ statement = ddl.CreateUser( name=name, password=password, is_super=is_super ) self._execute(statement)
python
{ "resource": "" }
q24863
MapDClient.alter_user
train
def alter_user( self, name, password=None, is_super=None, insert_access=None ): """ Alter MapD user parameters Parameters ---------- name : string User name password : string Password is_super : bool If user is a superuser insert_access : string If users need to insert records to a database they do not own, use insert_access property to give them the required privileges. """ statement = ddl.AlterUser( name=name, password=password, is_super=is_super, insert_access=insert_access, ) self._execute(statement)
python
{ "resource": "" }
q24864
MapDClient.drop_user
train
def drop_user(self, name): """ Drop an MapD user Parameters ---------- name : string Database name """ statement = ddl.DropUser(name) self._execute(statement)
python
{ "resource": "" }
q24865
MapDClient.create_view
train
def create_view(self, name, expr, database=None): """ Create an MapD view from a table expression Parameters ---------- name : string expr : ibis TableExpr database : string, default None """ ast = self._build_ast(expr, MapDDialect.make_context()) select = ast.queries[0] statement = ddl.CreateView(name, select, database=database) self._execute(statement)
python
{ "resource": "" }
q24866
MapDClient.drop_view
train
def drop_view(self, name, database=None): """ Drop an MapD view Parameters ---------- name : string database : string, default None """ statement = ddl.DropView(name, database=database) self._execute(statement, False)
python
{ "resource": "" }
q24867
MapDClient.create_table
train
def create_table( self, table_name, obj=None, schema=None, database=None, max_rows=None ): """ Create a new table in MapD using an Ibis table expression. Parameters ---------- table_name : string obj : TableExpr or pandas.DataFrame, optional If passed, creates table from select statement results schema : ibis.Schema, optional Mutually exclusive with expr, creates an empty table with a particular schema database : string, default None (optional) max_rows : int, Default None Set the maximum number of rows allowed in a table to create a capped collection. When this limit is reached, the oldest fragment is removed. Default = 2^62. Examples -------- >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP """ _database = self.db_name self.set_database(database) if obj is not None: if isinstance(obj, pd.DataFrame): raise NotImplementedError( 'Pandas Data Frame input not implemented.' ) else: to_insert = obj ast = self._build_ast(to_insert, MapDDialect.make_context()) select = ast.queries[0] statement = ddl.CTAS(table_name, select, database=database) elif schema is not None: statement = ddl.CreateTableWithSchema( table_name, schema, database=database, max_rows=max_rows ) else: raise com.IbisError('Must pass expr or schema') result = self._execute(statement, False) self.set_database(_database) return result
python
{ "resource": "" }
q24868
MapDClient.drop_table
train
def drop_table(self, table_name, database=None, force=False): """ Drop an MapD table Parameters ---------- table_name : string database : string, default None (optional) force : boolean, default False Database may throw exception if table does not exist Examples -------- >>> table = 'my_table' >>> db = 'operations' >>> con.drop_table(table, database=db, force=True) # doctest: +SKIP """ _database = self.db_name self.set_database(database) statement = ddl.DropTable( table_name, database=database, must_exist=not force ) self._execute(statement, False) self.set_database(_database)
python
{ "resource": "" }
q24869
MapDClient.truncate_table
train
def truncate_table(self, table_name, database=None): """ Delete all rows from, but do not drop, an existing table Parameters ---------- table_name : string database : string, default None (optional) """ statement = ddl.TruncateTable(table_name, database=database) self._execute(statement, False)
python
{ "resource": "" }
q24870
MapDClient.drop_table_or_view
train
def drop_table_or_view(self, name, database=None, force=False): """ Attempt to drop a relation that may be a view or table """ try: self.drop_table(name, database=database) except Exception as e: try: self.drop_view(name, database=database) except Exception: raise e
python
{ "resource": "" }
q24871
MapDClient.load_data
train
def load_data(self, table_name, obj, database=None, **kwargs): """ Wraps the LOAD DATA DDL statement. Loads data into an MapD table by physically moving data files. Parameters ---------- table_name : string obj: pandas.DataFrame or pyarrow.Table database : string, default None (optional) """ _database = self.db_name self.set_database(database) self.con.load_table(table_name, obj, **kwargs) self.set_database(_database)
python
{ "resource": "" }
q24872
connect
train
def connect( project_id: Optional[str] = None, dataset_id: Optional[str] = None, credentials: Optional[google.auth.credentials.Credentials] = None, ) -> BigQueryClient: """Create a BigQueryClient for use with Ibis. Parameters ---------- project_id : str A BigQuery project id. dataset_id : str A dataset id that lives inside of the project indicated by `project_id`. credentials : google.auth.credentials.Credentials Returns ------- BigQueryClient """ if credentials is None: credentials_cache = pydata_google_auth.cache.ReadWriteCredentialsCache( filename="ibis.json" ) credentials, project_id = pydata_google_auth.default( SCOPES, client_id=CLIENT_ID, client_secret=CLIENT_SECRET, credentials_cache=credentials_cache, ) return BigQueryClient( project_id, dataset_id=dataset_id, credentials=credentials )
python
{ "resource": "" }
q24873
udf
train
def udf(f): """Create a SQLite scalar UDF from `f` Parameters ---------- f A callable object Returns ------- callable A callable object that returns ``None`` if any of its inputs are ``None``. """ @functools.wraps(f) def wrapper(*args): if any(arg is None for arg in args): return None return f(*args) _SQLITE_UDF_REGISTRY.add(wrapper) return wrapper
python
{ "resource": "" }
q24874
_ibis_sqlite_regex_extract
train
def _ibis_sqlite_regex_extract(string, pattern, index): """Extract match of regular expression `pattern` from `string` at `index`. Parameters ---------- string : str pattern : str index : int Returns ------- result : str or None """ result = re.search(pattern, string) if result is not None and 0 <= index <= (result.lastindex or -1): return result.group(index) return None
python
{ "resource": "" }
q24875
_register_function
train
def _register_function(func, con): """Register a Python callable with a SQLite connection `con`. Parameters ---------- func : callable con : sqlalchemy.Connection """ nargs = number_of_arguments(func) con.connection.connection.create_function(func.__name__, nargs, func)
python
{ "resource": "" }
q24876
_register_aggregate
train
def _register_aggregate(agg, con): """Register a Python class that performs aggregation in SQLite. Parameters ---------- agg : type con : sqlalchemy.Connection """ nargs = number_of_arguments(agg.step) - 1 # because self con.connection.connection.create_aggregate(agg.__name__, nargs, agg)
python
{ "resource": "" }
q24877
SQLiteClient.attach
train
def attach(self, name, path, create=False): """Connect another SQLite database file Parameters ---------- name : string Database name within SQLite path : string Path to sqlite3 file create : boolean, optional If file does not exist, create file if True otherwise raise an Exception """ if not os.path.exists(path) and not create: raise com.IbisError('File {!r} does not exist'.format(path)) self.raw_sql( "ATTACH DATABASE {path!r} AS {name}".format( path=path, name=self.con.dialect.identifier_preparer.quote(name), ) )
python
{ "resource": "" }
q24878
SQLiteClient.table
train
def table(self, name, database=None): """ Create a table expression that references a particular table in the SQLite database Parameters ---------- name : string database : string, optional name of the attached database that the table is located in. Returns ------- table : TableExpr """ alch_table = self._get_sqla_table(name, schema=database) node = self.table_class(alch_table, self) return self.table_expr_class(node)
python
{ "resource": "" }
q24879
_sign
train
def _sign(translator, expr): """Workaround for missing sign function""" op = expr.op() arg, = op.args arg_ = translator.translate(arg) return 'intDivOrZero({0}, abs({0}))'.format(arg_)
python
{ "resource": "" }
q24880
hdfs_connect
train
def hdfs_connect( host='localhost', port=50070, protocol='webhdfs', use_https='default', auth_mechanism='NOSASL', verify=True, session=None, **kwds ): """Connect to HDFS. Parameters ---------- host : str Host name of the HDFS NameNode port : int NameNode's WebHDFS port protocol : str, The protocol used to communicate with HDFS. The only valid value is ``'webhdfs'``. use_https : bool Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure authentication, the default for this is True, otherwise False. auth_mechanism : str Set to NOSASL or PLAIN for non-secure clusters. Set to GSSAPI or LDAP for Kerberos-secured clusters. verify : bool Set to :data:`False` to turn off verifying SSL certificates. session : Optional[requests.Session] A custom :class:`requests.Session` object. Notes ----- Other keywords are forwarded to HDFS library classes. Returns ------- WebHDFS """ import requests if session is None: session = requests.Session() session.verify = verify if auth_mechanism in ('GSSAPI', 'LDAP'): if use_https == 'default': prefix = 'https' else: prefix = 'https' if use_https else 'http' try: import requests_kerberos # noqa: F401 except ImportError: raise IbisError( "Unable to import requests-kerberos, which is required for " "Kerberos HDFS support. Install it by executing `pip install " "requests-kerberos` or `pip install hdfs[kerberos]`." ) from hdfs.ext.kerberos import KerberosClient # note SSL url = '{0}://{1}:{2}'.format(prefix, host, port) kwds.setdefault('mutual_auth', 'OPTIONAL') hdfs_client = KerberosClient(url, session=session, **kwds) else: if use_https == 'default': prefix = 'http' else: prefix = 'https' if use_https else 'http' from hdfs.client import InsecureClient url = '{}://{}:{}'.format(prefix, host, port) hdfs_client = InsecureClient(url, session=session, **kwds) return WebHDFS(hdfs_client)
python
{ "resource": "" }
q24881
_array_repeat
train
def _array_repeat(t, expr): """Is this really that useful? Repeat an array like a Python list using modular arithmetic, scalar subqueries, and PostgreSQL's ARRAY function. This is inefficient if PostgreSQL allocates memory for the entire sequence and the output column. A quick glance at PostgreSQL's C code shows the sequence is evaluated stepwise, which suggests that it's roughly constant memory for the sequence generation. """ raw, times = map(t.translate, expr.op().args) # SQLAlchemy uses our column's table in the FROM clause. We need a simpler # expression to workaround this. array = sa.column(raw.name, type_=raw.type) # We still need to prefix the table name to the column name in the final # query, so make sure the column knows its origin array.table = raw.table array_length = _cardinality(array) # sequence from 1 to the total number of elements desired in steps of 1. # the call to greatest isn't necessary, but it provides clearer intent # rather than depending on the implicit postgres generate_series behavior start = step = 1 stop = sa.func.greatest(times, 0) * array_length series = sa.func.generate_series(start, stop, step).alias() series_column = sa.column(series.name, type_=sa.INTEGER) # if our current index modulo the array's length # is a multiple of the array's length, then the index is the array's length index_expression = series_column % array_length index = sa.func.coalesce(sa.func.nullif(index_expression, 0), array_length) # tie it all together in a scalar subquery and collapse that into an ARRAY selected = sa.select([array[index]]).select_from(series) return sa.func.array(selected.as_scalar())
python
{ "resource": "" }
q24882
_replace_interval_with_scalar
train
def _replace_interval_with_scalar(expr): """ Good old Depth-First Search to identify the Interval and IntervalValue components of the expression and return a comparable scalar expression. Parameters ---------- expr : float or expression of intervals For example, ``ibis.interval(days=1) + ibis.interval(hours=5)`` Returns ------- preceding : float or ir.FloatingScalar, depending upon the expr """ try: expr_op = expr.op() except AttributeError: expr_op = None if not isinstance(expr, (dt.Interval, ir.IntervalValue)): # Literal expressions have op method but native types do not. if isinstance(expr_op, ops.Literal): return expr_op.value else: return expr elif isinstance(expr, dt.Interval): try: microseconds = _map_interval_to_microseconds[expr.unit] return microseconds except KeyError: raise ValueError( "Expected preceding values of week(), " + "day(), hour(), minute(), second(), millisecond(), " + "microseconds(), nanoseconds(); got {}".format(expr) ) elif expr_op.args and isinstance(expr, ir.IntervalValue): if len(expr_op.args) > 2: raise com.NotImplementedError( "'preceding' argument cannot be parsed." ) left_arg = _replace_interval_with_scalar(expr_op.args[0]) right_arg = _replace_interval_with_scalar(expr_op.args[1]) method = _map_interval_op_to_op[type(expr_op)] return method(left_arg, right_arg)
python
{ "resource": "" }
q24883
find_nodes
train
def find_nodes(expr, node_types): """Depth-first search of the expression tree yielding nodes of a given type or set of types. Parameters ---------- expr: ibis.expr.types.Expr node_types: type or tuple of types Yields ------ op: type A node of given node_types """ def extender(op): return (arg for arg in op.args if isinstance(arg, ir.Expr)) return _search_for_nodes([expr], extender, node_types)
python
{ "resource": "" }
q24884
roots
train
def roots(expr, types=(ops.PhysicalTable,)): """Yield every node of a particular type on which an expression depends. Parameters ---------- expr : Expr The expression to analyze types : tuple(type), optional, default (:mod:`ibis.expr.operations.PhysicalTable`,) The node types to traverse Yields ------ table : Expr Unique node types on which an expression depends Notes ----- If your question is: "What nodes of type T does `expr` depend on?", then you've come to the right place. By default, we yield the physical tables that an expression depends on. """ stack = [ arg.to_expr() for arg in reversed(expr.op().root_tables()) if isinstance(arg, types) ] def extender(op): return reversed( list( itertools.chain.from_iterable( arg.op().root_tables() for arg in op.flat_args() if isinstance(arg, types) ) ) ) return _search_for_nodes(stack, extender, types)
python
{ "resource": "" }
q24885
_get_args
train
def _get_args(op, name): """Hack to get relevant arguments for lineage computation. We need a better way to determine the relevant arguments of an expression. """ # Could use multipledispatch here to avoid the pasta if isinstance(op, ops.Selection): assert name is not None, 'name is None' result = op.selections # if Selection.selections is always columnar, could use an # OrderedDict to prevent scanning the whole thing return [col for col in result if col._name == name] elif isinstance(op, ops.Aggregation): assert name is not None, 'name is None' return [ col for col in itertools.chain(op.by, op.metrics) if col._name == name ] else: return op.args
python
{ "resource": "" }
q24886
lineage
train
def lineage(expr, container=Stack): """Yield the path of the expression tree that comprises a column expression. Parameters ---------- expr : Expr An ibis expression. It must be an instance of :class:`ibis.expr.types.ColumnExpr`. container : Container, {Stack, Queue} Stack for depth-first traversal, and Queue for breadth-first. Depth-first will reach root table nodes before continuing on to other columns in a column that is derived from multiple column. Breadth- first will traverse all columns at each level before reaching root tables. Yields ------ node : Expr A column and its dependencies """ if not isinstance(expr, ir.ColumnExpr): raise TypeError('Input expression must be an instance of ColumnExpr') c = container([(expr, expr._name)]) seen = set() # while we haven't visited everything while c: node, name = c.get() if node not in seen: seen.add(node) yield node # add our dependencies to the container if they match our name # and are ibis expressions c.extend( (arg, getattr(arg, '_name', name)) for arg in c.visitor(_get_args(node.op(), name)) if isinstance(arg, ir.Expr) )
python
{ "resource": "" }
q24887
traverse
train
def traverse(fn, expr, type=ir.Expr, container=Stack): """Utility for generic expression tree traversal Parameters ---------- fn : Callable[[ir.Expr], Tuple[Union[Boolean, Iterable], Any]] This function will be applied on each expressions, it must return a tuple. The first element of the tuple controls the traversal, and the second is the result if its not None. expr: ir.Expr The traversable expression or a list of expressions. type: Type Only the instances if this type are traversed. container: Union[Stack, Queue], default Stack Defines the traversing order. """ args = expr if isinstance(expr, collections.abc.Iterable) else [expr] todo = container(arg for arg in args if isinstance(arg, type)) seen = set() while todo: expr = todo.get() op = expr.op() if op in seen: continue else: seen.add(op) control, result = fn(expr) if result is not None: yield result if control is not halt: if control is proceed: args = op.flat_args() elif isinstance(control, collections.abc.Iterable): args = control else: raise TypeError( 'First item of the returned tuple must be ' 'an instance of boolean or iterable' ) todo.extend( arg for arg in todo.visitor(args) if isinstance(arg, type) )
python
{ "resource": "" }
q24888
adjoin
train
def adjoin(space: int, *lists: Sequence[str]) -> str: """Glue together two sets of strings using `space`.""" lengths = [max(map(len, x)) + space for x in lists[:-1]] # not the last one lengths.append(max(map(len, lists[-1]))) max_len = max(map(len, lists)) chains = ( itertools.chain( (x.ljust(length) for x in lst), itertools.repeat(' ' * length, max_len - len(lst)), ) for lst, length in zip(lists, lengths) ) return '\n'.join(map(''.join, zip(*chains)))
python
{ "resource": "" }
q24889
approx_equal
train
def approx_equal(a: Real, b: Real, eps: Real): """Return whether the difference between `a` and `b` is less than `eps`. Parameters ---------- a b eps Returns ------- bool """ assert abs(a - b) < eps
python
{ "resource": "" }
q24890
safe_index
train
def safe_index(elements: Sequence[T], value: T): """Find the location of `value` in `elements`, return -1 if `value` is not found instead of raising ``ValueError``. Parameters ---------- elements value Returns ------- int Examples -------- >>> sequence = [1, 2, 3] >>> safe_index(sequence, 2) 1 >>> safe_index(sequence, 4) -1 """ try: return elements.index(value) except ValueError: return -1
python
{ "resource": "" }
q24891
convert_unit
train
def convert_unit(value, unit, to): """Convert `value`, is assumed to be in units of `unit`, to units of `to`. Parameters ---------- value : Union[numbers.Real, ibis.expr.types.NumericValue] Returns ------- Union[numbers.Integral, ibis.expr.types.NumericValue] Examples -------- >>> one_second = 1000 >>> x = convert_unit(one_second, 'ms', 's') >>> x 1 >>> one_second = 1 >>> x = convert_unit(one_second, 's', 'ms') >>> x 1000 >>> x = convert_unit(one_second, 's', 's') >>> x 1 >>> x = convert_unit(one_second, 's', 'M') Traceback (most recent call last): ... ValueError: Cannot convert to or from variable length interval """ # Don't do anything if from and to units are equivalent if unit == to: return value units = ('W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns') factors = (7, 24, 60, 60, 1000, 1000, 1000) monthly_units = ('Y', 'Q', 'M') monthly_factors = (4, 3) try: i, j = units.index(unit), units.index(to) except ValueError: try: i, j = monthly_units.index(unit), monthly_units.index(to) factors = monthly_factors except ValueError: raise ValueError( 'Cannot convert to or from variable length interval' ) factor = functools.reduce(operator.mul, factors[min(i, j) : max(i, j)], 1) assert factor > 1 if i < j: return value * factor assert i > j return value // factor
python
{ "resource": "" }
q24892
consume
train
def consume(iterator: Iterator[T], n: Optional[int] = None) -> None: """Advance the iterator n-steps ahead. If n is None, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(itertools.islice(iterator, n, n), None)
python
{ "resource": "" }
q24893
arguments_from_signature
train
def arguments_from_signature(signature, *args, **kwargs): """Validate signature against `args` and `kwargs` and return the kwargs asked for in the signature Parameters ---------- args : Tuple[object...] kwargs : Dict[str, object] Returns ------- Tuple[Tuple, Dict[str, Any]] Examples -------- >>> from inspect import signature >>> def foo(a, b=1): ... return a + b >>> foo_sig = signature(foo) >>> args, kwargs = arguments_from_signature(foo_sig, 1, b=2) >>> args (1,) >>> kwargs {'b': 2} >>> def bar(a): ... return a + 1 >>> bar_sig = signature(bar) >>> args, kwargs = arguments_from_signature(bar_sig, 1, b=2) >>> args (1,) >>> kwargs {} """ bound = signature.bind_partial(*args) meta_kwargs = toolz.merge({'kwargs': kwargs}, kwargs) remaining_parameters = signature.parameters.keys() - bound.arguments.keys() new_kwargs = { k: meta_kwargs[k] for k in remaining_parameters if k in signature.parameters if signature.parameters[k].kind in { Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD, Parameter.VAR_KEYWORD, } } return args, new_kwargs
python
{ "resource": "" }
q24894
parameter_count
train
def parameter_count(funcsig): """Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters """ return sum( param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY} for param in funcsig.parameters.values() if param.default is Parameter.empty )
python
{ "resource": "" }
q24895
udf.reduction
train
def reduction(input_type, output_type): """Define a user-defined reduction function that takes N pandas Series or scalar values as inputs and produces one row of output. Parameters ---------- input_type : List[ibis.expr.datatypes.DataType] A list of the types found in :mod:`~ibis.expr.datatypes`. The length of this list must match the number of arguments to the function. Variadic arguments are not yet supported. output_type : ibis.expr.datatypes.DataType The return type of the function. Examples -------- >>> import ibis >>> import ibis.expr.datatypes as dt >>> from ibis.pandas.udf import udf >>> @udf.reduction(input_type=[dt.string], output_type=dt.int64) ... def my_string_length_agg(series, **kwargs): ... return (series.str.len() * 2).sum() """ return udf._grouped( input_type, output_type, base_class=ops.Reduction, output_type_method=operator.attrgetter('scalar_type'), )
python
{ "resource": "" }
q24896
udf._grouped
train
def _grouped(input_type, output_type, base_class, output_type_method): """Define a user-defined function that is applied per group. Parameters ---------- input_type : List[ibis.expr.datatypes.DataType] A list of the types found in :mod:`~ibis.expr.datatypes`. The length of this list must match the number of arguments to the function. Variadic arguments are not yet supported. output_type : ibis.expr.datatypes.DataType The return type of the function. base_class : Type[T] The base class of the generated Node output_type_method : Callable A callable that determines the method to call to get the expression type of the UDF See Also -------- ibis.pandas.udf.reduction ibis.pandas.udf.analytic """ def wrapper(func): funcsig = valid_function_signature(input_type, func) UDAFNode = type( func.__name__, (base_class,), { 'signature': sig.TypeSignature.from_dtypes(input_type), 'output_type': output_type_method(output_type), }, ) # An execution rule for a simple aggregate node @execute_node.register( UDAFNode, *udf_signature(input_type, pin=None, klass=pd.Series) ) def execute_udaf_node(op, *args, **kwargs): args, kwargs = arguments_from_signature( funcsig, *args, **kwargs ) return func(*args, **kwargs) # An execution rule for a grouped aggregation node. This # includes aggregates applied over a window. nargs = len(input_type) group_by_signatures = [ udf_signature(input_type, pin=pin, klass=SeriesGroupBy) for pin in range(nargs) ] @toolz.compose( *( execute_node.register(UDAFNode, *types) for types in group_by_signatures ) ) def execute_udaf_node_groupby(op, *args, **kwargs): # construct a generator that yields the next group of data # for every argument excluding the first (pandas performs # the iteration for the first argument) for each argument # that is a SeriesGroupBy. # # If the argument is not a SeriesGroupBy then keep # repeating it until all groups are exhausted. aggcontext = kwargs.pop('aggcontext', None) assert aggcontext is not None, 'aggcontext is None' iters = ( (data for _, data in arg) if isinstance(arg, SeriesGroupBy) else itertools.repeat(arg) for arg in args[1:] ) funcsig = signature(func) def aggregator(first, *rest, **kwargs): # map(next, *rest) gets the inputs for the next group # TODO: might be inefficient to do this on every call args, kwargs = arguments_from_signature( funcsig, first, *map(next, rest), **kwargs ) return func(*args, **kwargs) result = aggcontext.agg(args[0], aggregator, *iters, **kwargs) return result @functools.wraps(func) def wrapped(*args): return UDAFNode(*args).to_expr() return wrapped return wrapper
python
{ "resource": "" }
q24897
schema_from_table
train
def schema_from_table(table, schema=None): """Retrieve an ibis schema from a SQLAlchemy ``Table``. Parameters ---------- table : sa.Table Returns ------- schema : ibis.expr.datatypes.Schema An ibis schema corresponding to the types of the columns in `table`. """ schema = schema if schema is not None else {} pairs = [] for name, column in table.columns.items(): if name in schema: dtype = dt.dtype(schema[name]) else: dtype = dt.dtype( getattr(table.bind, 'dialect', SQLAlchemyDialect()), column.type, nullable=column.nullable, ) pairs.append((name, dtype)) return sch.schema(pairs)
python
{ "resource": "" }
q24898
invalidates_reflection_cache
train
def invalidates_reflection_cache(f): """Invalidate the SQLAlchemy reflection cache if `f` performs an operation that mutates database or table metadata such as ``CREATE TABLE``, ``DROP TABLE``, etc. Parameters ---------- f : callable A method on :class:`ibis.sql.alchemy.AlchemyClient` """ @functools.wraps(f) def wrapped(self, *args, **kwargs): result = f(self, *args, **kwargs) # only invalidate the cache after we've succesfully called the wrapped # function self._reflection_cache_is_dirty = True return result return wrapped
python
{ "resource": "" }
q24899
AlchemyDatabaseSchema.table
train
def table(self, name): """ Return a table expression referencing a table in this schema Returns ------- table : TableExpr """ qualified_name = self._qualify(name) return self.database.table(qualified_name, self.name)
python
{ "resource": "" }