_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q24900
|
convert_to_database_compatible_value
|
train
|
def convert_to_database_compatible_value(value):
"""Pandas 0.23 broke DataFrame.to_sql, so we workaround it by rolling our
own extremely low-tech conversion routine
"""
if pd.isnull(value):
return None
if isinstance(value, pd.Timestamp):
return value.to_pydatetime()
try:
return value.item()
except AttributeError:
return value
|
python
|
{
"resource": ""
}
|
q24901
|
matches
|
train
|
def matches(value, pattern):
"""Check whether `value` matches `pattern`.
Parameters
----------
value : ast.AST
pattern : ast.AST
Returns
-------
matched : bool
"""
# types must match exactly
if type(value) != type(pattern):
return False
# primitive value, such as None, True, False etc
if not isinstance(value, ast.AST) and not isinstance(pattern, ast.AST):
return value == pattern
fields = [
(field, getattr(pattern, field))
for field in pattern._fields
if hasattr(pattern, field)
]
for field_name, field_value in fields:
if not matches(getattr(value, field_name), field_value):
return False
return True
|
python
|
{
"resource": ""
}
|
q24902
|
highest_precedence_dtype
|
train
|
def highest_precedence_dtype(exprs):
"""Return the highest precedence type from the passed expressions
Also verifies that there are valid implicit casts between any of the types
and the selected highest precedence type.
This is a thin wrapper around datatypes highest precedence check.
Parameters
----------
exprs : Iterable[ir.ValueExpr]
A sequence of Expressions
Returns
-------
dtype: DataType
The highest precedence datatype
"""
if not exprs:
raise ValueError('Must pass at least one expression')
return dt.highest_precedence(expr.type() for expr in exprs)
|
python
|
{
"resource": ""
}
|
q24903
|
castable
|
train
|
def castable(source, target):
"""Return whether source ir type is implicitly castable to target
Based on the underlying datatypes and the value in case of Literals
"""
op = source.op()
value = getattr(op, 'value', None)
return dt.castable(source.type(), target.type(), value=value)
|
python
|
{
"resource": ""
}
|
q24904
|
one_of
|
train
|
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
)
|
python
|
{
"resource": ""
}
|
q24905
|
instance_of
|
train
|
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg
|
python
|
{
"resource": ""
}
|
q24906
|
value
|
train
|
def value(dtype, arg):
"""Validates that the given argument is a Value with a particular datatype
Parameters
----------
dtype : DataType subclass or DataType instance
arg : python literal or an ibis expression
If a python literal is given the validator tries to coerce it to an ibis
literal.
Returns
-------
arg : AnyValue
An ibis value expression with the specified datatype
"""
if not isinstance(arg, ir.Expr):
# coerce python literal to ibis literal
arg = ir.literal(arg)
if not isinstance(arg, ir.AnyValue):
raise com.IbisTypeError(
'Given argument with type {} is not a value '
'expression'.format(type(arg))
)
# retrieve literal values for implicit cast check
value = getattr(arg.op(), 'value', None)
if isinstance(dtype, type) and isinstance(arg.type(), dtype):
# dtype class has been specified like dt.Interval or dt.Decimal
return arg
elif dt.castable(arg.type(), dt.dtype(dtype), value=value):
# dtype instance or string has been specified and arg's dtype is
# implicitly castable to it, like dt.int8 is castable to dt.int64
return arg
else:
raise com.IbisTypeError(
'Given argument with datatype {} is not '
'subtype of {} nor implicitly castable to '
'it'.format(arg.type(), dtype)
)
|
python
|
{
"resource": ""
}
|
q24907
|
table
|
train
|
def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg
raise com.IbisTypeError(
'Argument is not a table with column subset of {}'.format(schema)
)
|
python
|
{
"resource": ""
}
|
q24908
|
wrap_uda
|
train
|
def wrap_uda(
hdfs_file,
inputs,
output,
update_fn,
init_fn=None,
merge_fn=None,
finalize_fn=None,
serialize_fn=None,
close_fn=None,
name=None,
):
"""
Creates a callable aggregation function object. Must be created in Impala
to be used
Parameters
----------
hdfs_file: .so file that contains relevant UDA
inputs: list of strings denoting ibis datatypes
output: string denoting ibis datatype
update_fn: string
Library symbol name for update function
init_fn: string, optional
Library symbol name for initialization function
merge_fn: string, optional
Library symbol name for merge function
finalize_fn: string, optional
Library symbol name for finalize function
serialize_fn : string, optional
Library symbol name for serialize UDA API function. Not required for all
UDAs; see documentation for more.
close_fn : string, optional
name: string, optional
Used internally to track function
Returns
-------
container : UDA object
"""
func = ImpalaUDA(
inputs,
output,
update_fn,
init_fn,
merge_fn,
finalize_fn,
serialize_fn=serialize_fn,
name=name,
lib_path=hdfs_file,
)
return func
|
python
|
{
"resource": ""
}
|
q24909
|
wrap_udf
|
train
|
def wrap_udf(hdfs_file, inputs, output, so_symbol, name=None):
"""
Creates a callable scalar function object. Must be created in Impala to be
used
Parameters
----------
hdfs_file: .so file that contains relevant UDF
inputs: list of strings or sig.TypeSignature
Input types to UDF
output: string
Ibis data type
so_symbol: string, C++ function name for relevant UDF
name: string (optional). Used internally to track function
Returns
-------
container : UDF object
"""
func = ImpalaUDF(inputs, output, so_symbol, name=name, lib_path=hdfs_file)
return func
|
python
|
{
"resource": ""
}
|
q24910
|
add_operation
|
train
|
def add_operation(op, func_name, db):
"""
Registers the given operation within the Ibis SQL translation toolchain
Parameters
----------
op: operator class
name: used in issuing statements to SQL engine
database: database the relevant operator is registered to
"""
full_name = '{0}.{1}'.format(db, func_name)
# TODO
# if op.input_type is rlz.listof:
# translator = comp.varargs(full_name)
# else:
arity = len(op.signature)
translator = comp.fixed_arity(full_name, arity)
comp._operation_registry[op] = translator
|
python
|
{
"resource": ""
}
|
q24911
|
connect
|
train
|
def connect(
host='localhost',
port=21050,
database='default',
timeout=45,
use_ssl=False,
ca_cert=None,
user=None,
password=None,
auth_mechanism='NOSASL',
kerberos_service_name='impala',
pool_size=8,
hdfs_client=None,
):
"""Create an ImpalaClient for use with Ibis.
Parameters
----------
host : str, optional
Host name of the impalad or HiveServer2 in Hive
port : int, optional
Impala's HiveServer2 port
database : str, optional
Default database when obtaining new cursors
timeout : int, optional
Connection timeout in seconds when communicating with HiveServer2
use_ssl : bool, optional
Use SSL when connecting to HiveServer2
ca_cert : str, optional
Local path to 3rd party CA certificate or copy of server certificate
for self-signed certificates. If SSL is enabled, but this argument is
``None``, then certificate validation is skipped.
user : str, optional
LDAP user to authenticate
password : str, optional
LDAP password to authenticate
auth_mechanism : str, optional
{'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.
Use NOSASL for non-secured Impala connections. Use PLAIN for
non-secured Hive clusters. Use LDAP for LDAP authenticated
connections. Use GSSAPI for Kerberos-secured clusters.
kerberos_service_name : str, optional
Specify particular impalad service principal.
Examples
--------
>>> import ibis
>>> import os
>>> hdfs_host = os.environ.get('IBIS_TEST_NN_HOST', 'localhost')
>>> hdfs_port = int(os.environ.get('IBIS_TEST_NN_PORT', 50070))
>>> impala_host = os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost')
>>> impala_port = int(os.environ.get('IBIS_TEST_IMPALA_PORT', 21050))
>>> hdfs = ibis.hdfs_connect(host=hdfs_host, port=hdfs_port)
>>> hdfs # doctest: +ELLIPSIS
<ibis.filesystems.WebHDFS object at 0x...>
>>> client = ibis.impala.connect(
... host=impala_host,
... port=impala_port,
... hdfs_client=hdfs,
... )
>>> client # doctest: +ELLIPSIS
<ibis.impala.client.ImpalaClient object at 0x...>
Returns
-------
ImpalaClient
"""
params = {
'host': host,
'port': port,
'database': database,
'timeout': timeout,
'use_ssl': use_ssl,
'ca_cert': ca_cert,
'user': user,
'password': password,
'auth_mechanism': auth_mechanism,
'kerberos_service_name': kerberos_service_name,
}
con = ImpalaConnection(pool_size=pool_size, **params)
try:
client = ImpalaClient(con, hdfs_client=hdfs_client)
except Exception:
con.close()
raise
else:
if options.default_backend is None:
options.default_backend = client
return client
|
python
|
{
"resource": ""
}
|
q24912
|
execute_with_scope
|
train
|
def execute_with_scope(expr, scope, aggcontext=None, clients=None, **kwargs):
"""Execute an expression `expr`, with data provided in `scope`.
Parameters
----------
expr : ibis.expr.types.Expr
The expression to execute.
scope : collections.Mapping
A dictionary mapping :class:`~ibis.expr.operations.Node` subclass
instances to concrete data such as a pandas DataFrame.
aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext]
Returns
-------
result : scalar, pd.Series, pd.DataFrame
"""
op = expr.op()
# Call pre_execute, to allow clients to intercept the expression before
# computing anything *and* before associating leaf nodes with data. This
# allows clients to provide their own data for each leaf.
if clients is None:
clients = list(find_backends(expr))
if aggcontext is None:
aggcontext = agg_ctx.Summarize()
pre_executed_scope = pre_execute(
op, *clients, scope=scope, aggcontext=aggcontext, **kwargs
)
new_scope = toolz.merge(scope, pre_executed_scope)
result = execute_until_in_scope(
expr,
new_scope,
aggcontext=aggcontext,
clients=clients,
# XXX: we *explicitly* pass in scope and not new_scope here so that
# post_execute sees the scope of execute_with_scope, not the scope of
# execute_until_in_scope
post_execute_=functools.partial(
post_execute,
scope=scope,
aggcontext=aggcontext,
clients=clients,
**kwargs,
),
**kwargs,
)
return result
|
python
|
{
"resource": ""
}
|
q24913
|
execute_until_in_scope
|
train
|
def execute_until_in_scope(
expr, scope, aggcontext=None, clients=None, post_execute_=None, **kwargs
):
"""Execute until our op is in `scope`.
Parameters
----------
expr : ibis.expr.types.Expr
scope : Mapping
aggcontext : Optional[AggregationContext]
clients : List[ibis.client.Client]
kwargs : Mapping
"""
# these should never be None
assert aggcontext is not None, 'aggcontext is None'
assert clients is not None, 'clients is None'
assert post_execute_ is not None, 'post_execute_ is None'
# base case: our op has been computed (or is a leaf data node), so
# return the corresponding value
op = expr.op()
if op in scope:
return scope[op]
new_scope = execute_bottom_up(
expr,
scope,
aggcontext=aggcontext,
post_execute_=post_execute_,
clients=clients,
**kwargs,
)
new_scope = toolz.merge(
new_scope, pre_execute(op, *clients, scope=scope, **kwargs)
)
return execute_until_in_scope(
expr,
new_scope,
aggcontext=aggcontext,
clients=clients,
post_execute_=post_execute_,
**kwargs,
)
|
python
|
{
"resource": ""
}
|
q24914
|
execute_bottom_up
|
train
|
def execute_bottom_up(
expr, scope, aggcontext=None, post_execute_=None, clients=None, **kwargs
):
"""Execute `expr` bottom-up.
Parameters
----------
expr : ibis.expr.types.Expr
scope : Mapping[ibis.expr.operations.Node, object]
aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext]
kwargs : Dict[str, object]
Returns
-------
result : Mapping[
ibis.expr.operations.Node,
Union[pandas.Series, pandas.DataFrame, scalar_types]
]
A mapping from node to the computed result of that Node
"""
assert post_execute_ is not None, 'post_execute_ is None'
op = expr.op()
# if we're in scope then return the scope, this will then be passed back
# into execute_bottom_up, which will then terminate
if op in scope:
return scope
elif isinstance(op, ops.Literal):
# special case literals to avoid the overhead of dispatching
# execute_node
return {
op: execute_literal(
op, op.value, expr.type(), aggcontext=aggcontext, **kwargs
)
}
# figure out what arguments we're able to compute on based on the
# expressions inputs. things like expressions, None, and scalar types are
# computable whereas ``list``s are not
computable_args = [arg for arg in op.inputs if is_computable_input(arg)]
# recursively compute each node's arguments until we've changed type
scopes = [
execute_bottom_up(
arg,
scope,
aggcontext=aggcontext,
post_execute_=post_execute_,
clients=clients,
**kwargs,
)
if hasattr(arg, 'op')
else {arg: arg}
for arg in computable_args
]
# if we're unable to find data then raise an exception
if not scopes:
raise com.UnboundExpressionError(
'Unable to find data for expression:\n{}'.format(repr(expr))
)
# there should be exactly one dictionary per computable argument
assert len(computable_args) == len(scopes)
new_scope = toolz.merge(scopes)
# pass our computed arguments to this node's execute_node implementation
data = [
new_scope[arg.op()] if hasattr(arg, 'op') else arg
for arg in computable_args
]
result = execute_node(
op,
*data,
scope=scope,
aggcontext=aggcontext,
clients=clients,
**kwargs,
)
computed = post_execute_(op, result)
return {op: computed}
|
python
|
{
"resource": ""
}
|
q24915
|
KuduImpalaInterface.connect
|
train
|
def connect(
self,
host_or_hosts,
port_or_ports=7051,
rpc_timeout=None,
admin_timeout=None,
):
"""
Pass-through connection interface to the Kudu client
Parameters
----------
host_or_hosts : string or list of strings
If you have multiple Kudu masters for HA, pass a list
port_or_ports : int or list of int, default 7051
If you pass multiple host names, pass multiple ports
rpc_timeout : kudu.TimeDelta
See Kudu client documentation for details
admin_timeout : kudu.TimeDelta
See Kudu client documentation for details
Returns
-------
None
"""
self.client = kudu.connect(
host_or_hosts,
port_or_ports,
rpc_timeout_ms=rpc_timeout,
admin_timeout_ms=admin_timeout,
)
|
python
|
{
"resource": ""
}
|
q24916
|
KuduImpalaInterface.create_table
|
train
|
def create_table(
self,
impala_name,
kudu_name,
primary_keys=None,
obj=None,
schema=None,
database=None,
external=False,
force=False,
):
"""
Create an Kudu-backed table in the connected Impala cluster. For
non-external tables, this will create a Kudu table with a compatible
storage schema.
This function is patterned after the ImpalaClient.create_table function
designed for physical filesystems (like HDFS).
Parameters
----------
impala_name : string
Name of the created Impala table
kudu_name : string
Name of hte backing Kudu table. Will be created if external=False
primary_keys : list of column names
List of
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
external : boolean, default False
If False, a new Kudu table will be created. Otherwise, the Kudu table
must already exist.
"""
self._check_connected()
if not external and (primary_keys is None or len(primary_keys) == 0):
raise ValueError(
'Must specify primary keys when DDL creates a '
'new Kudu table'
)
if obj is not None:
if external:
raise ValueError(
'Cannot create an external Kudu-Impala table '
'from an expression or DataFrame'
)
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, to_insert = write_temp_dataframe(
self.impala_client, obj
)
else:
to_insert = obj
# XXX: exposing a lot of internals
ast = self.impala_client._build_ast(to_insert)
select = ast.queries[0]
stmt = CTASKudu(
impala_name,
kudu_name,
self.client.master_addrs,
select,
primary_keys,
database=database,
)
else:
if external:
ktable = self.client.table(kudu_name)
kschema = ktable.schema
schema = schema_kudu_to_ibis(kschema)
primary_keys = kschema.primary_keys()
elif schema is None:
raise ValueError(
'Must specify schema for new empty ' 'Kudu-backed table'
)
stmt = CreateTableKudu(
impala_name,
kudu_name,
self.client.master_addrs,
schema,
primary_keys,
external=external,
database=database,
can_exist=False,
)
self.impala_client._execute(stmt)
|
python
|
{
"resource": ""
}
|
q24917
|
connect
|
train
|
def connect(
host='localhost',
port=9000,
database='default',
user='default',
password='',
client_name='ibis',
compression=_default_compression,
):
"""Create an ClickhouseClient for use with Ibis.
Parameters
----------
host : str, optional
Host name of the clickhouse server
port : int, optional
Clickhouse server's port
database : str, optional
Default database when executing queries
user : str, optional
User to authenticate with
password : str, optional
Password to authenticate with
client_name: str, optional
This will appear in clickhouse server logs
compression: str, optional
Weather or not to use compression.
Default is lz4 if installed else False.
Possible choices: lz4, lz4hc, quicklz, zstd, True, False
True is equivalent to 'lz4'.
Examples
--------
>>> import ibis
>>> import os
>>> clickhouse_host = os.environ.get('IBIS_TEST_CLICKHOUSE_HOST',
... 'localhost')
>>> clickhouse_port = int(os.environ.get('IBIS_TEST_CLICKHOUSE_PORT',
... 9000))
>>> client = ibis.clickhouse.connect(
... host=clickhouse_host,
... port=clickhouse_port
... )
>>> client # doctest: +ELLIPSIS
<ibis.clickhouse.client.ClickhouseClient object at 0x...>
Returns
-------
ClickhouseClient
"""
client = ClickhouseClient(
host,
port=port,
database=database,
user=user,
password=password,
client_name=client_name,
compression=compression,
)
if options.default_backend is None:
options.default_backend = client
return client
|
python
|
{
"resource": ""
}
|
q24918
|
ClickhouseClient.list_databases
|
train
|
def list_databases(self, like=None):
"""
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SELECT name FROM system.databases'
if like:
statement += " WHERE name LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0]
|
python
|
{
"resource": ""
}
|
q24919
|
remap_overlapping_column_names
|
train
|
def remap_overlapping_column_names(table_op, root_table, data_columns):
"""Return an ``OrderedDict`` mapping possibly suffixed column names to
column names without suffixes.
Parameters
----------
table_op : TableNode
The ``TableNode`` we're selecting from.
root_table : TableNode
The root table of the expression we're selecting from.
data_columns : set or frozenset
The available columns to select from
Returns
-------
mapping : OrderedDict[str, str]
A map from possibly-suffixed column names to column names without
suffixes.
"""
if not isinstance(table_op, ops.Join):
return None
left_root, right_root = ops.distinct_roots(table_op.left, table_op.right)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
column_names = [
({name, name + suffixes[root_table]} & data_columns, name)
for name in root_table.schema.names
]
mapping = OrderedDict(
(first(col_name), final_name)
for col_name, final_name in column_names
if col_name
)
return mapping
|
python
|
{
"resource": ""
}
|
q24920
|
_compute_predicates
|
train
|
def _compute_predicates(table_op, predicates, data, scope, **kwargs):
"""Compute the predicates for a table operation.
Parameters
----------
table_op : TableNode
predicates : List[ir.ColumnExpr]
data : pd.DataFrame
scope : dict
kwargs : dict
Returns
-------
computed_predicate : pd.Series[bool]
Notes
-----
This handles the cases where the predicates are computed columns, in
addition to the simple case of named columns coming directly from the input
table.
"""
for predicate in predicates:
# Map each root table of the predicate to the data so that we compute
# predicates on the result instead of any left or right tables if the
# Selection is on a Join. Project data to only inlude columns from
# the root table.
root_tables = predicate.op().root_tables()
# handle suffixes
additional_scope = {}
data_columns = frozenset(data.columns)
for root_table in root_tables:
mapping = remap_overlapping_column_names(
table_op, root_table, data_columns
)
if mapping is not None:
new_data = data.loc[:, mapping.keys()].rename(columns=mapping)
else:
new_data = data
additional_scope[root_table] = new_data
new_scope = toolz.merge(scope, additional_scope)
yield execute(predicate, scope=new_scope, **kwargs)
|
python
|
{
"resource": ""
}
|
q24921
|
flatten_union
|
train
|
def flatten_union(table):
"""Extract all union queries from `table`.
Parameters
----------
table : TableExpr
Returns
-------
Iterable[Union[TableExpr, bool]]
"""
op = table.op()
if isinstance(op, ops.Union):
return toolz.concatv(
flatten_union(op.left), [op.distinct], flatten_union(op.right)
)
return [table]
|
python
|
{
"resource": ""
}
|
q24922
|
ExprTranslator.get_result
|
train
|
def get_result(self):
"""
Build compiled SQL expression from the bottom up and return as a string
"""
translated = self.translate(self.expr)
if self._needs_name(self.expr):
# TODO: this could fail in various ways
name = self.expr.get_name()
translated = self.name(translated, name)
return translated
|
python
|
{
"resource": ""
}
|
q24923
|
Select.compile
|
train
|
def compile(self):
"""
This method isn't yet idempotent; calling multiple times may yield
unexpected results
"""
# Can't tell if this is a hack or not. Revisit later
self.context.set_query(self)
# If any subqueries, translate them and add to beginning of query as
# part of the WITH section
with_frag = self.format_subqueries()
# SELECT
select_frag = self.format_select_set()
# FROM, JOIN, UNION
from_frag = self.format_table_set()
# WHERE
where_frag = self.format_where()
# GROUP BY and HAVING
groupby_frag = self.format_group_by()
# ORDER BY
order_frag = self.format_order_by()
# LIMIT
limit_frag = self.format_limit()
# Glue together the query fragments and return
query = '\n'.join(
filter(
None,
[
with_frag,
select_frag,
from_frag,
where_frag,
groupby_frag,
order_frag,
limit_frag,
],
)
)
return query
|
python
|
{
"resource": ""
}
|
q24924
|
table
|
train
|
def table(schema, name=None):
"""
Create an unbound Ibis table for creating expressions. Cannot be executed
without being bound to some physical table.
Useful for testing
Parameters
----------
schema : ibis Schema
name : string, default None
Name for table
Returns
-------
table : TableExpr
"""
if not isinstance(schema, Schema):
if isinstance(schema, dict):
schema = Schema.from_dict(schema)
else:
schema = Schema.from_tuples(schema)
node = ops.UnboundTable(schema, name=name)
return node.to_expr()
|
python
|
{
"resource": ""
}
|
q24925
|
timestamp
|
train
|
def timestamp(value, timezone=None):
"""
Returns a timestamp literal if value is likely coercible to a timestamp
Parameters
----------
value : timestamp value as string
timezone: timezone as string
defaults to None
Returns
--------
result : TimestampScalar
"""
if isinstance(value, str):
try:
value = pd.Timestamp(value, tz=timezone)
except pd.errors.OutOfBoundsDatetime:
value = dateutil.parser.parse(value)
if isinstance(value, numbers.Integral):
raise TypeError(
(
"Passing an integer to ibis.timestamp is not supported. Use "
"ibis.literal({value:d}).to_timestamp() to create a timestamp "
"expression from an integer."
).format(value=value)
)
return literal(value, type=dt.Timestamp(timezone=timezone))
|
python
|
{
"resource": ""
}
|
q24926
|
date
|
train
|
def date(value):
"""
Returns a date literal if value is likely coercible to a date
Parameters
----------
value : date value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = to_date(value)
return literal(value, type=dt.date)
|
python
|
{
"resource": ""
}
|
q24927
|
time
|
train
|
def time(value):
"""
Returns a time literal if value is likely coercible to a time
Parameters
----------
value : time value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = to_time(value)
return literal(value, type=dt.time)
|
python
|
{
"resource": ""
}
|
q24928
|
interval
|
train
|
def interval(
value=None,
unit='s',
years=None,
quarters=None,
months=None,
weeks=None,
days=None,
hours=None,
minutes=None,
seconds=None,
milliseconds=None,
microseconds=None,
nanoseconds=None,
):
"""
Returns an interval literal
Parameters
----------
value : int or datetime.timedelta, default None
years : int, default None
quarters : int, default None
months : int, default None
days : int, default None
weeks : int, default None
hours : int, default None
minutes : int, default None
seconds : int, default None
milliseconds : int, default None
microseconds : int, default None
nanoseconds : int, default None
Returns
--------
result : IntervalScalar
"""
if value is not None:
if isinstance(value, datetime.timedelta):
unit = 's'
value = int(value.total_seconds())
elif not isinstance(value, int):
raise ValueError('Interval value must be an integer')
else:
kwds = [
('Y', years),
('Q', quarters),
('M', months),
('W', weeks),
('D', days),
('h', hours),
('m', minutes),
('s', seconds),
('ms', milliseconds),
('us', microseconds),
('ns', nanoseconds),
]
defined_units = [(k, v) for k, v in kwds if v is not None]
if len(defined_units) != 1:
raise ValueError('Exactly one argument is required')
unit, value = defined_units[0]
value_type = literal(value).type()
type = dt.Interval(unit, value_type)
return literal(value, type=type).op().to_expr()
|
python
|
{
"resource": ""
}
|
q24929
|
negate
|
train
|
def negate(arg):
"""
Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller
"""
op = arg.op()
if hasattr(op, 'negate'):
result = op.negate()
else:
result = ops.Negate(arg)
return result.to_expr()
|
python
|
{
"resource": ""
}
|
q24930
|
over
|
train
|
def over(expr, window):
"""
Turn an aggregation or full-sample analytic operation into a windowed
operation. See ibis.window for more details on window configuration
Parameters
----------
expr : value expression
window : ibis.Window
Returns
-------
expr : type of input
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result
|
python
|
{
"resource": ""
}
|
q24931
|
value_counts
|
train
|
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric)
|
python
|
{
"resource": ""
}
|
q24932
|
isin
|
train
|
def isin(arg, values):
"""
Check whether the value expression is contained within the indicated
list of values.
Parameters
----------
values : list, tuple, or array expression
The values can be scalar or array-like. Each of them must be
comparable with the calling expression, or None (NULL).
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> table2 = ibis.table([('other_string_col', 'string')])
>>> expr = table.string_col.isin(['foo', 'bar', 'baz'])
>>> expr2 = table.string_col.isin(table2.other_string_col)
Returns
-------
contains : BooleanValue
"""
op = ops.Contains(arg, values)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24933
|
cases
|
train
|
def cases(arg, case_result_pairs, default=None):
"""
Create a case expression in one shot.
Returns
-------
case_expr : SimpleCase
"""
builder = arg.case()
for case, result in case_result_pairs:
builder = builder.when(case, result)
if default is not None:
builder = builder.else_(default)
return builder.end()
|
python
|
{
"resource": ""
}
|
q24934
|
_generic_summary
|
train
|
def _generic_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input value expression
Parameters
----------
arg : value expression
exact_nunique : boolean, default False
Compute the exact number of distinct values (slower)
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, nunique)
"""
metrics = [arg.count(), arg.isnull().sum().name('nulls')]
if exact_nunique:
unique_metric = arg.nunique().name('uniques')
else:
unique_metric = arg.approx_nunique().name('uniques')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
|
python
|
{
"resource": ""
}
|
q24935
|
_numeric_summary
|
train
|
def _numeric_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean(),
]
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
|
python
|
{
"resource": ""
}
|
q24936
|
round
|
train
|
def round(arg, digits=None):
"""
Round values either to integer or indicated number of decimal places.
Returns
-------
rounded : type depending on digits argument
digits None or 0
decimal types: decimal
other numeric types: bigint
digits nonzero
decimal types: decimal
other numeric types: double
"""
op = ops.Round(arg, digits)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24937
|
log
|
train
|
def log(arg, base=None):
"""
Perform the logarithm using a specified base
Parameters
----------
base : number, default None
If None, base e is used
Returns
-------
logarithm : double type
"""
op = ops.Log(arg, base)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24938
|
quantile
|
train
|
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24939
|
_integer_to_interval
|
train
|
def _integer_to_interval(arg, unit='s'):
"""
Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression
"""
op = ops.IntervalFromInteger(arg, unit)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24940
|
correlation
|
train
|
def correlation(left, right, where=None, how='sample'):
"""
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
"""
expr = ops.Correlation(left, right, how, where).to_expr()
return expr
|
python
|
{
"resource": ""
}
|
q24941
|
covariance
|
train
|
def covariance(left, right, where=None, how='sample'):
"""
Compute covariance of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
cov : double scalar
"""
expr = ops.Covariance(left, right, how, where).to_expr()
return expr
|
python
|
{
"resource": ""
}
|
q24942
|
geo_area
|
train
|
def geo_area(arg, use_spheroid=None):
"""
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid: default None
Returns
-------
area : double scalar
"""
op = ops.GeoArea(arg, use_spheroid)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24943
|
geo_contains
|
train
|
def geo_contains(left, right):
"""
Check if the first geometry contains the second one
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains : bool scalar
"""
op = ops.GeoContains(left, right)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24944
|
geo_distance
|
train
|
def geo_distance(left, right, use_spheroid=None):
"""
Compute distance between two geo spatial data
Parameters
----------
left : geometry or geography
right : geometry or geography
use_spheroid : default None
Returns
-------
distance : double scalar
"""
op = ops.GeoDistance(left, right, use_spheroid)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24945
|
geo_length
|
train
|
def geo_length(arg, use_spheroid=None):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg, use_spheroid)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24946
|
geo_perimeter
|
train
|
def geo_perimeter(arg, use_spheroid=None):
"""
Compute perimeter of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid : default None
Returns
-------
perimeter : double scalar
"""
op = ops.GeoPerimeter(arg, use_spheroid)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24947
|
geo_max_distance
|
train
|
def geo_max_distance(left, right):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
"""
op = ops.GeoMaxDistance(left, right)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24948
|
geo_point_n
|
train
|
def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24949
|
ifelse
|
train
|
def ifelse(arg, true_expr, false_expr):
"""
Shorthand for implementing ternary expressions
bool_expr.ifelse(0, 1)
e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
"""
# Result will be the result of promotion of true/false exprs. These
# might be conflicting types; same type resolution as case expressions
# must be used.
case = ops.SearchedCaseBuilder()
return case.when(arg, true_expr).else_(false_expr).end()
|
python
|
{
"resource": ""
}
|
q24950
|
_string_substr
|
train
|
def _string_substr(self, start, length=None):
"""
Pull substrings out of each string value by position and maximum
length.
Parameters
----------
start : int
First character to start splitting, indices starting at 0 (like
Python)
length : int, optional
Maximum length of each substring. If not supplied, splits each string
to the end
Returns
-------
substrings : type of caller
"""
op = ops.Substring(self, start, length)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24951
|
regex_extract
|
train
|
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return ops.RegexExtract(arg, pattern, index).to_expr()
|
python
|
{
"resource": ""
}
|
q24952
|
regex_replace
|
train
|
def regex_replace(arg, pattern, replacement):
"""
Replaces match found by regex with replacement string.
Replacement string can also be a regex
Parameters
----------
pattern : string (regular expression string)
replacement : string (can be regular expression string)
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('(b+)', r'<\1>') # 'aaabbbaa' becomes 'aaa<bbb>aaa' # noqa: E501
Returns
-------
modified : string
"""
return ops.RegexReplace(arg, pattern, replacement).to_expr()
|
python
|
{
"resource": ""
}
|
q24953
|
_string_replace
|
train
|
def _string_replace(arg, pattern, replacement):
"""
Replaces each exactly occurrence of pattern with given replacement
string. Like Python built-in str.replace
Parameters
----------
pattern : string
replacement : string
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('aaa', 'foo') # 'aaabbbaaa' becomes 'foobbbfoo' # noqa: E501
Returns
-------
replaced : string
"""
return ops.StringReplace(arg, pattern, replacement).to_expr()
|
python
|
{
"resource": ""
}
|
q24954
|
to_timestamp
|
train
|
def to_timestamp(arg, format_str, timezone=None):
"""
Parses a string and returns a timestamp.
Parameters
----------
format_str : A format string potentially of the type '%Y-%m-%d'
timezone : An optional string indicating the timezone,
i.e. 'America/New_York'
Examples
--------
>>> import ibis
>>> date_as_str = ibis.literal('20170206')
>>> result = date_as_str.to_timestamp('%Y%m%d')
Returns
-------
parsed : TimestampValue
"""
return ops.StringToTimestamp(arg, format_str, timezone).to_expr()
|
python
|
{
"resource": ""
}
|
q24955
|
parse_url
|
train
|
def parse_url(arg, extract, key=None):
"""
Returns the portion of a URL corresponding to a part specified
by 'extract'
Can optionally specify a key to retrieve an associated value
if extract parameter is 'QUERY'
Parameters
----------
extract : one of {'PROTOCOL', 'HOST', 'PATH', 'REF',
'AUTHORITY', 'FILE', 'USERINFO', 'QUERY'}
key : string (optional)
Examples
--------
>>> url = "https://www.youtube.com/watch?v=kEuEcWfewf8&t=10"
>>> parse_url(url, 'QUERY', 'v') # doctest: +SKIP
'kEuEcWfewf8'
Returns
-------
extracted : string
"""
return ops.ParseURL(arg, extract, key).to_expr()
|
python
|
{
"resource": ""
}
|
q24956
|
_array_slice
|
train
|
def _array_slice(array, index):
"""Slice or index `array` at `index`.
Parameters
----------
index : int or ibis.expr.types.IntegerValue or slice
Returns
-------
sliced_array : ibis.expr.types.ValueExpr
If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then
the return type is the element type of `array`. If `index` is a
``slice`` then the return type is the same type as the input.
"""
if isinstance(index, slice):
start = index.start
stop = index.stop
if (start is not None and start < 0) or (
stop is not None and stop < 0
):
raise ValueError('negative slicing not yet supported')
step = index.step
if step is not None and step != 1:
raise NotImplementedError('step can only be 1')
op = ops.ArraySlice(array, start if start is not None else 0, stop)
else:
op = ops.ArrayIndex(array, index)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24957
|
get
|
train
|
def get(expr, key, default=None):
"""
Return the mapped value for this key, or the default
if the key does not exist
Parameters
----------
key : any
default : any
"""
return ops.MapValueOrDefaultForKey(expr, key, default).to_expr()
|
python
|
{
"resource": ""
}
|
q24958
|
_struct_get_field
|
train
|
def _struct_get_field(expr, field_name):
"""Get the `field_name` field from the ``Struct`` expression `expr`.
Parameters
----------
field_name : str
The name of the field to access from the ``Struct`` typed expression
`expr`. Must be a Python ``str`` type; programmatic struct field
access is not yet supported.
Returns
-------
value_expr : ibis.expr.types.ValueExpr
An expression with the type of the field being accessed.
"""
return ops.StructField(expr, field_name).to_expr().name(field_name)
|
python
|
{
"resource": ""
}
|
q24959
|
join
|
train
|
def join(left, right, predicates=(), how='inner'):
"""Perform a relational join between two tables. Does not resolve resulting
table schema.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
how : string, default 'inner'
- 'inner': inner join
- 'left': left join
- 'outer': full outer join
- 'right': right outer join
- 'semi' or 'left_semi': left semi join
- 'anti': anti join
Returns
-------
joined : TableExpr
Note that the schema is not materialized yet
"""
klass = _join_classes[how.lower()]
if isinstance(predicates, Expr):
predicates = _L.flatten_predicate(predicates)
op = klass(left, right, predicates)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24960
|
asof_join
|
train
|
def asof_join(left, right, predicates=(), by=(), tolerance=None):
"""Perform an asof join between two tables. Similar to a left join
except that the match is done on nearest key rather than equal keys.
Optionally, match keys with 'by' before joining with predicates.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
by : string
column to group by before joining
tolerance : interval
Amount of time to look behind when joining
Returns
-------
joined : TableExpr
Note that the schema is not materialized yet
"""
return ops.AsOfJoin(left, right, predicates, by, tolerance).to_expr()
|
python
|
{
"resource": ""
}
|
q24961
|
_table_info
|
train
|
def _table_info(self, buf=None):
"""
Similar to pandas DataFrame.info. Show column names, types, and null
counts. Output to stdout by default
"""
metrics = [self.count().name('nrows')]
for col in self.columns:
metrics.append(self[col].count().name(col))
metrics = self.aggregate(metrics).execute().loc[0]
names = ['Column', '------'] + self.columns
types = ['Type', '----'] + [repr(x) for x in self.schema().types]
counts = ['Non-null #', '----------'] + [str(x) for x in metrics[1:]]
col_metrics = util.adjoin(2, names, types, counts)
result = 'Table rows: {}\n\n{}'.format(metrics[0], col_metrics)
print(result, file=buf)
|
python
|
{
"resource": ""
}
|
q24962
|
_table_set_column
|
train
|
def _table_set_column(table, name, expr):
"""
Replace an existing column with a new expression
Parameters
----------
name : string
Column name to replace
expr : value expression
New data for column
Returns
-------
set_table : TableExpr
New table expression
"""
expr = table._ensure_expr(expr)
if expr._name != name:
expr = expr.name(name)
if name not in table:
raise KeyError('{0} is not in the table'.format(name))
# TODO: This assumes that projection is required; may be backend-dependent
proj_exprs = []
for key in table.columns:
if key == name:
proj_exprs.append(expr)
else:
proj_exprs.append(table[key])
return table.projection(proj_exprs)
|
python
|
{
"resource": ""
}
|
q24963
|
filter
|
train
|
def filter(table, predicates):
"""
Select rows from table based on boolean expressions
Parameters
----------
predicates : boolean array expressions, or list thereof
Returns
-------
filtered_expr : TableExpr
"""
resolved_predicates = _resolve_predicates(table, predicates)
return _L.apply_filter(table, resolved_predicates)
|
python
|
{
"resource": ""
}
|
q24964
|
aggregate
|
train
|
def aggregate(table, metrics=None, by=None, having=None, **kwds):
"""
Aggregate a table with a given set of reductions, with grouping
expressions, and post-aggregation filters.
Parameters
----------
table : table expression
metrics : expression or expression list
by : optional, default None
Grouping expressions
having : optional, default None
Post-aggregation filters
Returns
-------
agg_expr : TableExpr
"""
if metrics is None:
metrics = []
for k, v in sorted(kwds.items()):
v = table._ensure_expr(v)
metrics.append(v.name(k))
op = table.op().aggregate(table, metrics, by=by, having=having)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24965
|
_table_union
|
train
|
def _table_union(left, right, distinct=False):
"""
Form the table set union of two table expressions having identical
schemas.
Parameters
----------
right : TableExpr
distinct : boolean, default False
Only union distinct rows not occurring in the calling table (this
can be very expensive, be careful)
Returns
-------
union : TableExpr
"""
op = ops.Union(left, right, distinct=distinct)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24966
|
_table_materialize
|
train
|
def _table_materialize(table):
"""
Force schema resolution for a joined table, selecting all fields from
all tables.
"""
if table._is_materialized():
return table
op = ops.MaterializedJoin(table)
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24967
|
mutate
|
train
|
def mutate(table, exprs=None, **mutations):
"""
Convenience function for table projections involving adding columns
Parameters
----------
exprs : list, default None
List of named expressions to add as columns
mutations : keywords for new columns
Returns
-------
mutated : TableExpr
Examples
--------
Using keywords arguments to name the new columns
>>> import ibis
>>> table = ibis.table([('foo', 'double'), ('bar', 'double')], name='t')
>>> expr = table.mutate(qux=table.foo + table.bar, baz=5)
>>> expr # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
foo : float64
bar : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
baz = Literal[int8]
5
qux = Add[float64*]
left:
foo = Column[float64*] 'foo' from table
ref_0
right:
bar = Column[float64*] 'bar' from table
ref_0
Using the :meth:`ibis.expr.types.Expr.name` method to name the new columns
>>> new_columns = [ibis.literal(5).name('baz',),
... (table.foo + table.bar).name('qux')]
>>> expr2 = table.mutate(new_columns)
>>> expr.equals(expr2)
True
"""
if exprs is None:
exprs = []
else:
exprs = util.promote_list(exprs)
for k, v in sorted(mutations.items(), key=operator.itemgetter(0)):
if util.is_function(v):
v = v(table)
else:
v = as_value_expr(v)
exprs.append(v.name(k))
has_replacement = False
for expr in exprs:
if expr.get_name() in table:
has_replacement = True
if has_replacement:
by_name = dict((x.get_name(), x) for x in exprs)
used = set()
proj_exprs = []
for c in table.columns:
if c in by_name:
proj_exprs.append(by_name[c])
used.add(c)
else:
proj_exprs.append(c)
for x in exprs:
if x.get_name() not in used:
proj_exprs.append(x)
return table.projection(proj_exprs)
else:
return table.projection([table] + exprs)
|
python
|
{
"resource": ""
}
|
q24968
|
projection
|
train
|
def projection(table, exprs):
"""
Compute new table expression with the indicated column expressions from
this table.
Parameters
----------
exprs : column expression, or string, or list of column expressions and
strings. If strings passed, must be columns in the table already
Returns
-------
projection : TableExpr
Notes
-----
Passing an aggregate function to this method will broadcast the aggregate's
value over the number of rows in the table. See the examples section for
more details.
Examples
--------
Simple projection
>>> import ibis
>>> fields = [('a', 'int64'), ('b', 'double')]
>>> t = ibis.table(fields, name='t')
>>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])
>>> proj # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
a = Column[int64*] 'a' from table
ref_0
b_plus_1 = Add[float64*]
left:
b = Column[float64*] 'b' from table
ref_0
right:
Literal[int8]
1
>>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]
>>> proj.equals(proj2)
True
Aggregate projection
>>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]
>>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
sum_a = WindowOp[int64*]
sum_a = Sum[int64]
a = Column[int64*] 'a' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
mean_b = WindowOp[float64*]
mean_b = Mean[float64]
b = Column[float64*] 'b' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
Note the ``<ibis.expr.window.Window>`` objects here, their existence means
that the result of the aggregation will be broadcast across the number of
rows in the input column. The purpose of this expression rewrite is to make
it easy to write column/scalar-aggregate operations like
.. code-block:: python
t[(t.a - t.a.mean()).name('demeaned_a')]
"""
import ibis.expr.analysis as L
if isinstance(exprs, (Expr, str)):
exprs = [exprs]
projector = L.Projector(table, exprs)
op = projector.get_result()
return op.to_expr()
|
python
|
{
"resource": ""
}
|
q24969
|
_table_relabel
|
train
|
def _table_relabel(table, substitutions, replacements=None):
"""
Change table column names, otherwise leaving table unaltered
Parameters
----------
substitutions
Returns
-------
relabeled : TableExpr
"""
if replacements is not None:
raise NotImplementedError
observed = set()
exprs = []
for c in table.columns:
expr = table[c]
if c in substitutions:
expr = expr.name(substitutions[c])
observed.add(c)
exprs.append(expr)
for c in substitutions:
if c not in observed:
raise KeyError('{0!r} is not an existing column'.format(c))
return table.projection(exprs)
|
python
|
{
"resource": ""
}
|
q24970
|
prevent_rewrite
|
train
|
def prevent_rewrite(expr, client=None):
"""Prevent optimization from happening below `expr`.
Parameters
----------
expr : ir.TableExpr
Any table expression whose optimization you want to prevent
client : ibis.client.Client, optional, default None
A client to use to create the SQLQueryResult operation. This is useful
if you're compiling an expression that derives from an
:class:`~ibis.expr.operations.UnboundTable` operation.
Returns
-------
sql_query_result : ir.TableExpr
"""
if client is None:
client, = ibis.client.find_backends(expr)
query = client.compile(expr)
return ops.SQLQueryResult(query, expr.schema(), client).to_expr()
|
python
|
{
"resource": ""
}
|
q24971
|
from_dataframe
|
train
|
def from_dataframe(df, name='df', client=None):
"""
convenience function to construct an ibis table
from a DataFrame
EXPERIMENTAL API
Parameters
----------
df : DataFrame
name : str, default 'df'
client : Client, default new PandasClient
client dictionary will be mutated with the
name of the DataFrame
Returns
-------
Table
"""
if client is None:
return connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name)
|
python
|
{
"resource": ""
}
|
q24972
|
_flatten_subclass_tree
|
train
|
def _flatten_subclass_tree(cls):
"""Return the set of all child classes of `cls`.
Parameters
----------
cls : Type
Returns
-------
frozenset[Type]
"""
subclasses = frozenset(cls.__subclasses__())
children = frozenset(toolz.concat(map(_flatten_subclass_tree, subclasses)))
return frozenset({cls}) | subclasses | children
|
python
|
{
"resource": ""
}
|
q24973
|
bigquery_field_to_ibis_dtype
|
train
|
def bigquery_field_to_ibis_dtype(field):
"""Convert BigQuery `field` to an ibis type."""
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields, 'RECORD fields are empty'
names = [el.name for el in fields]
ibis_types = list(map(dt.dtype, fields))
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
|
python
|
{
"resource": ""
}
|
q24974
|
bigquery_schema
|
train
|
def bigquery_schema(table):
"""Infer the schema of a BigQuery `table` object."""
fields = OrderedDict((el.name, dt.dtype(el)) for el in table.schema)
partition_info = table._properties.get('timePartitioning', None)
# We have a partitioned table
if partition_info is not None:
partition_field = partition_info.get('field', NATIVE_PARTITION_COL)
# Only add a new column if it's not already a column in the schema
fields.setdefault(partition_field, dt.timestamp)
return sch.schema(fields)
|
python
|
{
"resource": ""
}
|
q24975
|
parse_project_and_dataset
|
train
|
def parse_project_and_dataset(
project: str, dataset: Optional[str] = None
) -> Tuple[str, str, Optional[str]]:
"""Compute the billing project, data project, and dataset if available.
This function figure out the project id under which queries will run versus
the project of where the data live as well as what dataset to use.
Parameters
----------
project : str
A project name
dataset : Optional[str]
A ``<project>.<dataset>`` string or just a dataset name
Examples
--------
>>> data_project, billing_project, dataset = parse_project_and_dataset(
... 'ibis-gbq',
... 'foo-bar.my_dataset'
... )
>>> data_project
'foo-bar'
>>> billing_project
'ibis-gbq'
>>> dataset
'my_dataset'
>>> data_project, billing_project, dataset = parse_project_and_dataset(
... 'ibis-gbq',
... 'my_dataset'
... )
>>> data_project
'ibis-gbq'
>>> billing_project
'ibis-gbq'
>>> dataset
'my_dataset'
>>> data_project, billing_project, dataset = parse_project_and_dataset(
... 'ibis-gbq'
... )
>>> data_project
'ibis-gbq'
>>> print(dataset)
None
"""
try:
data_project, dataset = dataset.split('.')
except (ValueError, AttributeError):
billing_project = data_project = project
else:
billing_project = project
return data_project, billing_project, dataset
|
python
|
{
"resource": ""
}
|
q24976
|
BigQueryCursor.fetchall
|
train
|
def fetchall(self):
"""Fetch all rows."""
result = self.query.result()
return [row.values() for row in result]
|
python
|
{
"resource": ""
}
|
q24977
|
BigQueryCursor.columns
|
train
|
def columns(self):
"""Return the columns of the result set."""
result = self.query.result()
return [field.name for field in result.schema]
|
python
|
{
"resource": ""
}
|
q24978
|
BigQueryCursor.description
|
train
|
def description(self):
"""Get the fields of the result set's schema."""
result = self.query.result()
return [field for field in result.schema]
|
python
|
{
"resource": ""
}
|
q24979
|
execute_cast_simple_literal_to_timestamp
|
train
|
def execute_cast_simple_literal_to_timestamp(op, data, type, **kwargs):
"""Cast integer and strings to timestamps"""
return pd.Timestamp(data, tz=type.timezone)
|
python
|
{
"resource": ""
}
|
q24980
|
execute_cast_timestamp_to_timestamp
|
train
|
def execute_cast_timestamp_to_timestamp(op, data, type, **kwargs):
"""Cast timestamps to other timestamps including timezone if necessary"""
input_timezone = data.tz
target_timezone = type.timezone
if input_timezone == target_timezone:
return data
if input_timezone is None or target_timezone is None:
return data.tz_localize(target_timezone)
return data.tz_convert(target_timezone)
|
python
|
{
"resource": ""
}
|
q24981
|
wrap_case_result
|
train
|
def wrap_case_result(raw, expr):
"""Wrap a CASE statement result in a Series and handle returning scalars.
Parameters
----------
raw : ndarray[T]
The raw results of executing the ``CASE`` expression
expr : ValueExpr
The expression from the which `raw` was computed
Returns
-------
Union[scalar, Series]
"""
raw_1d = np.atleast_1d(raw)
if np.any(pd.isnull(raw_1d)):
result = pd.Series(raw_1d)
else:
result = pd.Series(
raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()]
)
if result.size == 1 and isinstance(expr, ir.ScalarExpr):
return result.item()
return result
|
python
|
{
"resource": ""
}
|
q24982
|
window
|
train
|
def window(preceding=None, following=None, group_by=None, order_by=None):
"""Create a window clause for use with window functions.
This ROW window clause aggregates adjacent rows based on differences in row
number.
All window frames / ranges are inclusive.
Parameters
----------
preceding : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
following : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=following,
group_by=group_by,
order_by=order_by,
how='rows',
)
|
python
|
{
"resource": ""
}
|
q24983
|
range_window
|
train
|
def range_window(preceding=None, following=None, group_by=None, order_by=None):
"""Create a range-based window clause for use with window functions.
This RANGE window clause aggregates rows based upon differences in the
value of the order-by expression.
All window frames / ranges are inclusive.
Parameters
----------
preceding : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
following : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=following,
group_by=group_by,
order_by=order_by,
how='range',
)
|
python
|
{
"resource": ""
}
|
q24984
|
cumulative_window
|
train
|
def cumulative_window(group_by=None, order_by=None):
"""Create a cumulative window for use with aggregate window functions.
All window frames / ranges are inclusive.
Parameters
----------
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=None, following=0, group_by=group_by, order_by=order_by
)
|
python
|
{
"resource": ""
}
|
q24985
|
trailing_window
|
train
|
def trailing_window(rows, group_by=None, order_by=None):
"""Create a trailing window for use with aggregate window functions.
Parameters
----------
rows : int
Number of trailing rows to include. 0 includes only the current row
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=rows, following=0, group_by=group_by, order_by=order_by
)
|
python
|
{
"resource": ""
}
|
q24986
|
trailing_range_window
|
train
|
def trailing_range_window(preceding, order_by, group_by=None):
"""Create a trailing time window for use with aggregate window functions.
Parameters
----------
preceding : float or expression of intervals, i.e.
ibis.interval(days=1) + ibis.interval(hours=5)
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
group_by : expressions, default None
Either specify here or with TableExpr.group_by
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=0,
group_by=group_by,
order_by=order_by,
how='range',
)
|
python
|
{
"resource": ""
}
|
q24987
|
ImpalaDatabase.create_table
|
train
|
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
|
python
|
{
"resource": ""
}
|
q24988
|
ImpalaConnection.close
|
train
|
def close(self):
"""
Close all open Impyla sessions
"""
for impyla_connection in self._connections:
impyla_connection.close()
self._connections.clear()
self.connection_pool.clear()
|
python
|
{
"resource": ""
}
|
q24989
|
ImpalaTable.compute_stats
|
train
|
def compute_stats(self, incremental=False):
"""
Invoke Impala COMPUTE STATS command to compute column, table, and
partition statistics.
See also ImpalaClient.compute_stats
"""
return self._client.compute_stats(
self._qualified_name, incremental=incremental
)
|
python
|
{
"resource": ""
}
|
q24990
|
ImpalaTable.insert
|
train
|
def insert(
self,
obj=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""
Insert into Impala table. Wraps ImpalaClient.insert
Parameters
----------
obj : TableExpr or pandas DataFrame
overwrite : boolean, default False
If True, will replace existing contents of table
partition : list or dict, optional
For partitioned tables, indicate the partition that's being inserted
into, either with an ordered list of partition keys or a dict of
partition field name to value. For example for the partition
(year=2007, month=7), this can be either (2007, 7) or {'year': 2007,
'month': 7}.
validate : boolean, default True
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
>>> t.insert(table_expr) # doctest: +SKIP
# Completely overwrite contents
>>> t.insert(table_expr, overwrite=True) # doctest: +SKIP
"""
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, expr = write_temp_dataframe(self._client, obj)
else:
expr = obj
if values is not None:
raise NotImplementedError
if validate:
existing_schema = self.schema()
insert_schema = expr.schema()
if not insert_schema.equals(existing_schema):
_validate_compatible(insert_schema, existing_schema)
if partition is not None:
partition_schema = self.partition_schema()
partition_schema_names = frozenset(partition_schema.names)
expr = expr.projection(
[
column
for column in expr.columns
if column not in partition_schema_names
]
)
else:
partition_schema = None
ast = build_ast(expr, ImpalaDialect.make_context())
select = ast.queries[0]
statement = ddl.InsertSelect(
self._qualified_name,
select,
partition=partition,
partition_schema=partition_schema,
overwrite=overwrite,
)
return self._execute(statement)
|
python
|
{
"resource": ""
}
|
q24991
|
ImpalaTable.add_partition
|
train
|
def add_partition(self, spec, location=None):
"""
Add a new table partition, creating any new directories in HDFS if
necessary.
Partition parameters can be set in a single DDL statement, or you can
use alter_partition to set them after the fact.
Returns
-------
None (for now)
"""
part_schema = self.partition_schema()
stmt = ddl.AddPartition(
self._qualified_name, spec, part_schema, location=location
)
return self._execute(stmt)
|
python
|
{
"resource": ""
}
|
q24992
|
ImpalaTable.alter_partition
|
train
|
def alter_partition(
self,
spec,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
"""
Change setting and parameters of an existing partition
Parameters
----------
spec : dict or list
The partition keys for the partition being modified
location : string, optional
format : string, optional
tbl_properties : dict, optional
serde_properties : dict, optional
Returns
-------
None (for now)
"""
part_schema = self.partition_schema()
def _run_ddl(**kwds):
stmt = ddl.AlterPartition(
self._qualified_name, spec, part_schema, **kwds
)
return self._execute(stmt)
return self._alter_table_helper(
_run_ddl,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
|
python
|
{
"resource": ""
}
|
q24993
|
ImpalaTable.drop_partition
|
train
|
def drop_partition(self, spec):
"""
Drop an existing table partition
"""
part_schema = self.partition_schema()
stmt = ddl.DropPartition(self._qualified_name, spec, part_schema)
return self._execute(stmt)
|
python
|
{
"resource": ""
}
|
q24994
|
ImpalaClient.close
|
train
|
def close(self):
"""
Close Impala connection and drop any temporary objects
"""
for obj in self._temp_objects:
try:
obj.drop()
except HS2Error:
pass
self.con.close()
|
python
|
{
"resource": ""
}
|
q24995
|
ImpalaClient.create_database
|
train
|
def create_database(self, name, path=None, force=False):
"""
Create a new Impala database
Parameters
----------
name : string
Database name
path : string, default None
HDFS path where to store the database data; otherwise uses Impala
default
"""
if path:
# explicit mkdir ensures the user own the dir rather than impala,
# which is easier for manual cleanup, if necessary
self.hdfs.mkdir(path)
statement = ddl.CreateDatabase(name, path=path, can_exist=force)
return self._execute(statement)
|
python
|
{
"resource": ""
}
|
q24996
|
ImpalaClient.drop_database
|
train
|
def drop_database(self, name, force=False):
"""Drop an Impala database.
Parameters
----------
name : string
Database name
force : bool, default False
If False and there are any tables in this database, raises an
IntegrityError
"""
if not force or self.exists_database(name):
tables = self.list_tables(database=name)
udfs = self.list_udfs(database=name)
udas = self.list_udas(database=name)
else:
tables = []
udfs = []
udas = []
if force:
for table in tables:
self.log('Dropping {0}'.format('{0}.{1}'.format(name, table)))
self.drop_table_or_view(table, database=name)
for func in udfs:
self.log(
'Dropping function {0}({1})'.format(func.name, func.inputs)
)
self.drop_udf(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
for func in udas:
self.log(
'Dropping aggregate function {0}({1})'.format(
func.name, func.inputs
)
)
self.drop_uda(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
else:
if len(tables) > 0 or len(udfs) > 0 or len(udas) > 0:
raise com.IntegrityError(
'Database {0} must be empty before '
'being dropped, or set '
'force=True'.format(name)
)
statement = ddl.DropDatabase(name, must_exist=not force)
return self._execute(statement)
|
python
|
{
"resource": ""
}
|
q24997
|
ImpalaClient.list_databases
|
train
|
def list_databases(self, like=None):
"""
List databases in the Impala cluster. Like the SHOW DATABASES command
in the impala-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SHOW DATABASES'
if like:
statement += " LIKE '{0}'".format(like)
with self._execute(statement, results=True) as cur:
results = self._get_list(cur)
return results
|
python
|
{
"resource": ""
}
|
q24998
|
ImpalaClient.get_options
|
train
|
def get_options(self):
"""
Return current query options for the Impala session
"""
query = 'SET'
return dict(row[:2] for row in self.con.fetchall(query))
|
python
|
{
"resource": ""
}
|
q24999
|
ImpalaClient.parquet_file
|
train
|
def parquet_file(
self,
hdfs_dir,
schema=None,
name=None,
database=None,
external=True,
like_file=None,
like_table=None,
persist=False,
):
"""
Make indicated parquet file in HDFS available as an Ibis table.
The table created can be optionally named and persisted, otherwise a
unique name will be generated. Temporarily, for any non-persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected (or the Python interpreter shuts
down normally).
Parameters
----------
hdfs_dir : string
Path in HDFS
schema : ibis Schema
If no schema provided, and neither of the like_* argument is passed,
one will be inferred from one of the parquet files in the directory.
like_file : string
Absolute path to Parquet file in HDFS to use for schema
definitions. An alternative to having to supply an explicit schema
like_table : string
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table.
name : string, optional
random unique name generated otherwise
database : string, optional
Database to create the (possibly temporary) table in
external : boolean, default True
If a table is external, the referenced data will not be deleted when
the table is dropped in Impala. Otherwise (external=False) Impala
takes ownership of the Parquet file.
persist : boolean, default False
Do not drop the table upon Ibis garbage collection / interpreter
shutdown
Returns
-------
parquet_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
# If no schema provided, need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None:
file_name = self.hdfs._find_any_file(hdfs_dir)
like_file = pjoin(hdfs_dir, file_name)
stmt = ddl.CreateTableParquet(
name,
hdfs_dir,
schema=schema,
database=database,
example_file=like_file,
example_table=like_table,
external=external,
can_exist=False,
)
self._execute(stmt)
return self._wrap_new_table(name, database, persist)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.