| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | """Shared helper functions for connecting BigQuery and pandas.""" |
| |
|
| | import concurrent.futures |
| | from datetime import datetime |
| | import functools |
| | from itertools import islice |
| | import logging |
| | import queue |
| | import warnings |
| | from typing import Any, Union |
| |
|
| |
|
| | from google.cloud.bigquery import _pyarrow_helpers |
| | from google.cloud.bigquery import _versions_helpers |
| | from google.cloud.bigquery import schema |
| |
|
| | try: |
| | import pandas |
| |
|
| | pandas_import_exception = None |
| | except ImportError as exc: |
| | pandas = None |
| | pandas_import_exception = exc |
| | else: |
| | import numpy |
| |
|
| | try: |
| | import db_dtypes |
| |
|
| | date_dtype_name = db_dtypes.DateDtype.name |
| | time_dtype_name = db_dtypes.TimeDtype.name |
| | db_dtypes_import_exception = None |
| | except ImportError as exc: |
| | db_dtypes = None |
| | db_dtypes_import_exception = exc |
| | date_dtype_name = time_dtype_name = "" |
| |
|
| | pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import() |
| |
|
| | try: |
| | |
| | from shapely.geometry.base import BaseGeometry as _BaseGeometry |
| | except ImportError: |
| | |
| | _BaseGeometry = type(None) |
| | else: |
| | |
| | if pandas is not None: |
| |
|
| | def _to_wkb(): |
| | from shapely import wkb |
| |
|
| | write = wkb.dumps |
| | notnull = pandas.notnull |
| |
|
| | def _to_wkb(v): |
| | return write(v) if notnull(v) else v |
| |
|
| | return _to_wkb |
| |
|
| | _to_wkb = _to_wkb() |
| |
|
| | try: |
| | from google.cloud.bigquery_storage import ArrowSerializationOptions |
| | except ImportError: |
| | _ARROW_COMPRESSION_SUPPORT = False |
| | else: |
| | |
| | _ARROW_COMPRESSION_SUPPORT = True |
| |
|
| | _LOGGER = logging.getLogger(__name__) |
| |
|
| | _PROGRESS_INTERVAL = 0.2 |
| |
|
| | _MAX_QUEUE_SIZE_DEFAULT = object() |
| |
|
| | _NO_PANDAS_ERROR = "Please install the 'pandas' package to use this function." |
| | _NO_DB_TYPES_ERROR = "Please install the 'db-dtypes' package to use this function." |
| |
|
| | _PANDAS_DTYPE_TO_BQ = { |
| | "bool": "BOOLEAN", |
| | "datetime64[ns, UTC]": "TIMESTAMP", |
| | "datetime64[ns]": "DATETIME", |
| | "float32": "FLOAT", |
| | "float64": "FLOAT", |
| | "int8": "INTEGER", |
| | "int16": "INTEGER", |
| | "int32": "INTEGER", |
| | "int64": "INTEGER", |
| | "uint8": "INTEGER", |
| | "uint16": "INTEGER", |
| | "uint32": "INTEGER", |
| | "geometry": "GEOGRAPHY", |
| | date_dtype_name: "DATE", |
| | time_dtype_name: "TIME", |
| | } |
| |
|
| |
|
| | class _DownloadState(object): |
| | """Flag to indicate that a thread should exit early.""" |
| |
|
| | def __init__(self): |
| | |
| | |
| | |
| | self.done = False |
| |
|
| |
|
| | BQ_FIELD_TYPE_TO_ARROW_FIELD_METADATA = { |
| | "GEOGRAPHY": { |
| | b"ARROW:extension:name": b"google:sqlType:geography", |
| | b"ARROW:extension:metadata": b'{"encoding": "WKT"}', |
| | }, |
| | "DATETIME": {b"ARROW:extension:name": b"google:sqlType:datetime"}, |
| | } |
| |
|
| |
|
| | def bq_to_arrow_struct_data_type(field): |
| | arrow_fields = [] |
| | for subfield in field.fields: |
| | arrow_subfield = bq_to_arrow_field(subfield) |
| | if arrow_subfield: |
| | arrow_fields.append(arrow_subfield) |
| | else: |
| | |
| | |
| | return None |
| | return pyarrow.struct(arrow_fields) |
| |
|
| |
|
| | def bq_to_arrow_range_data_type(field): |
| | if field is None: |
| | raise ValueError( |
| | "Range element type cannot be None, must be one of " |
| | "DATE, DATETIME, or TIMESTAMP" |
| | ) |
| | element_type = field.element_type.upper() |
| | arrow_element_type = _pyarrow_helpers.bq_to_arrow_scalars(element_type)() |
| | return pyarrow.struct([("start", arrow_element_type), ("end", arrow_element_type)]) |
| |
|
| |
|
| | def bq_to_arrow_data_type(field): |
| | """Return the Arrow data type, corresponding to a given BigQuery column. |
| | |
| | Returns: |
| | None: if default Arrow type inspection should be used. |
| | """ |
| | if field.mode is not None and field.mode.upper() == "REPEATED": |
| | inner_type = bq_to_arrow_data_type( |
| | schema.SchemaField(field.name, field.field_type, fields=field.fields) |
| | ) |
| | if inner_type: |
| | return pyarrow.list_(inner_type) |
| | return None |
| |
|
| | field_type_upper = field.field_type.upper() if field.field_type else "" |
| | if field_type_upper in schema._STRUCT_TYPES: |
| | return bq_to_arrow_struct_data_type(field) |
| |
|
| | if field_type_upper == "RANGE": |
| | return bq_to_arrow_range_data_type(field.range_element_type) |
| |
|
| | data_type_constructor = _pyarrow_helpers.bq_to_arrow_scalars(field_type_upper) |
| | if data_type_constructor is None: |
| | return None |
| | return data_type_constructor() |
| |
|
| |
|
| | def bq_to_arrow_field(bq_field, array_type=None): |
| | """Return the Arrow field, corresponding to a given BigQuery column. |
| | |
| | Returns: |
| | None: if the Arrow type cannot be determined. |
| | """ |
| | arrow_type = bq_to_arrow_data_type(bq_field) |
| | if arrow_type is not None: |
| | if array_type is not None: |
| | arrow_type = array_type |
| | metadata = BQ_FIELD_TYPE_TO_ARROW_FIELD_METADATA.get( |
| | bq_field.field_type.upper() if bq_field.field_type else "" |
| | ) |
| | return pyarrow.field( |
| | bq_field.name, |
| | arrow_type, |
| | |
| | |
| | |
| | |
| | nullable=False if bq_field.mode.upper() == "REPEATED" else True, |
| | metadata=metadata, |
| | ) |
| |
|
| | warnings.warn( |
| | "Unable to determine Arrow type for field '{}'.".format(bq_field.name) |
| | ) |
| | return None |
| |
|
| |
|
| | def bq_to_arrow_schema(bq_schema): |
| | """Return the Arrow schema, corresponding to a given BigQuery schema. |
| | |
| | Returns: |
| | None: if any Arrow type cannot be determined. |
| | """ |
| | arrow_fields = [] |
| | for bq_field in bq_schema: |
| | arrow_field = bq_to_arrow_field(bq_field) |
| | if arrow_field is None: |
| | |
| | return None |
| | arrow_fields.append(arrow_field) |
| | return pyarrow.schema(arrow_fields) |
| |
|
| |
|
| | def default_types_mapper( |
| | date_as_object: bool = False, |
| | bool_dtype: Union[Any, None] = None, |
| | int_dtype: Union[Any, None] = None, |
| | float_dtype: Union[Any, None] = None, |
| | string_dtype: Union[Any, None] = None, |
| | date_dtype: Union[Any, None] = None, |
| | datetime_dtype: Union[Any, None] = None, |
| | time_dtype: Union[Any, None] = None, |
| | timestamp_dtype: Union[Any, None] = None, |
| | range_date_dtype: Union[Any, None] = None, |
| | range_datetime_dtype: Union[Any, None] = None, |
| | range_timestamp_dtype: Union[Any, None] = None, |
| | ): |
| | """Create a mapping from pyarrow types to pandas types. |
| | |
| | This overrides the pandas defaults to use null-safe extension types where |
| | available. |
| | |
| | See: https://arrow.apache.org/docs/python/api/datatypes.html for a list of |
| | data types. See: |
| | tests/unit/test__pandas_helpers.py::test_bq_to_arrow_data_type for |
| | BigQuery to Arrow type mapping. |
| | |
| | Note to google-cloud-bigquery developers: If you update the default dtypes, |
| | also update the docs at docs/usage/pandas.rst. |
| | """ |
| |
|
| | def types_mapper(arrow_data_type): |
| | if bool_dtype is not None and pyarrow.types.is_boolean(arrow_data_type): |
| | return bool_dtype |
| |
|
| | elif int_dtype is not None and pyarrow.types.is_integer(arrow_data_type): |
| | return int_dtype |
| |
|
| | elif float_dtype is not None and pyarrow.types.is_floating(arrow_data_type): |
| | return float_dtype |
| |
|
| | elif string_dtype is not None and pyarrow.types.is_string(arrow_data_type): |
| | return string_dtype |
| |
|
| | elif ( |
| | |
| | |
| | date_dtype is not None |
| | and not date_as_object |
| | and pyarrow.types.is_date(arrow_data_type) |
| | ): |
| | return date_dtype |
| |
|
| | elif ( |
| | datetime_dtype is not None |
| | and pyarrow.types.is_timestamp(arrow_data_type) |
| | and arrow_data_type.tz is None |
| | ): |
| | return datetime_dtype |
| |
|
| | elif ( |
| | timestamp_dtype is not None |
| | and pyarrow.types.is_timestamp(arrow_data_type) |
| | and arrow_data_type.tz is not None |
| | ): |
| | return timestamp_dtype |
| |
|
| | elif time_dtype is not None and pyarrow.types.is_time(arrow_data_type): |
| | return time_dtype |
| |
|
| | elif pyarrow.types.is_struct(arrow_data_type): |
| | if range_datetime_dtype is not None and arrow_data_type.equals( |
| | range_datetime_dtype.pyarrow_dtype |
| | ): |
| | return range_datetime_dtype |
| |
|
| | elif range_date_dtype is not None and arrow_data_type.equals( |
| | range_date_dtype.pyarrow_dtype |
| | ): |
| | return range_date_dtype |
| |
|
| | elif range_timestamp_dtype is not None and arrow_data_type.equals( |
| | range_timestamp_dtype.pyarrow_dtype |
| | ): |
| | return range_timestamp_dtype |
| |
|
| | return types_mapper |
| |
|
| |
|
| | def bq_to_arrow_array(series, bq_field): |
| | if bq_field.field_type.upper() == "GEOGRAPHY": |
| | arrow_type = None |
| | first = _first_valid(series) |
| | if first is not None: |
| | if series.dtype.name == "geometry" or isinstance(first, _BaseGeometry): |
| | arrow_type = pyarrow.binary() |
| | |
| | series = series.apply(_to_wkb) |
| | elif isinstance(first, bytes): |
| | arrow_type = pyarrow.binary() |
| | elif series.dtype.name == "geometry": |
| | |
| | series = pandas.Series(numpy.array(series)) |
| |
|
| | if arrow_type is None: |
| | arrow_type = bq_to_arrow_data_type(bq_field) |
| | else: |
| | arrow_type = bq_to_arrow_data_type(bq_field) |
| |
|
| | field_type_upper = bq_field.field_type.upper() if bq_field.field_type else "" |
| |
|
| | try: |
| | if bq_field.mode.upper() == "REPEATED": |
| | return pyarrow.ListArray.from_pandas(series, type=arrow_type) |
| | if field_type_upper in schema._STRUCT_TYPES: |
| | return pyarrow.StructArray.from_pandas(series, type=arrow_type) |
| | return pyarrow.Array.from_pandas(series, type=arrow_type) |
| | except pyarrow.ArrowTypeError: |
| | msg = f"""Error converting Pandas column with name: "{series.name}" and datatype: "{series.dtype}" to an appropriate pyarrow datatype: Array, ListArray, or StructArray""" |
| | _LOGGER.error(msg) |
| | raise pyarrow.ArrowTypeError(msg) |
| |
|
| |
|
| | def get_column_or_index(dataframe, name): |
| | """Return a column or index as a pandas series.""" |
| | if name in dataframe.columns: |
| | return dataframe[name].reset_index(drop=True) |
| |
|
| | if isinstance(dataframe.index, pandas.MultiIndex): |
| | if name in dataframe.index.names: |
| | return ( |
| | dataframe.index.get_level_values(name) |
| | .to_series() |
| | .reset_index(drop=True) |
| | ) |
| | else: |
| | if name == dataframe.index.name: |
| | return dataframe.index.to_series().reset_index(drop=True) |
| |
|
| | raise ValueError("column or index '{}' not found.".format(name)) |
| |
|
| |
|
| | def list_columns_and_indexes(dataframe): |
| | """Return all index and column names with dtypes. |
| | |
| | Returns: |
| | Sequence[Tuple[str, dtype]]: |
| | Returns a sorted list of indexes and column names with |
| | corresponding dtypes. If an index is missing a name or has the |
| | same name as a column, the index is omitted. |
| | """ |
| | column_names = frozenset(dataframe.columns) |
| | columns_and_indexes = [] |
| | if isinstance(dataframe.index, pandas.MultiIndex): |
| | for name in dataframe.index.names: |
| | if name and name not in column_names: |
| | values = dataframe.index.get_level_values(name) |
| | columns_and_indexes.append((name, values.dtype)) |
| | else: |
| | if dataframe.index.name and dataframe.index.name not in column_names: |
| | columns_and_indexes.append((dataframe.index.name, dataframe.index.dtype)) |
| |
|
| | columns_and_indexes += zip(dataframe.columns, dataframe.dtypes) |
| | return columns_and_indexes |
| |
|
| |
|
| | def _first_valid(series): |
| | first_valid_index = series.first_valid_index() |
| | if first_valid_index is not None: |
| | return series.at[first_valid_index] |
| |
|
| |
|
| | def _first_array_valid(series): |
| | """Return the first "meaningful" element from the array series. |
| | |
| | Here, "meaningful" means the first non-None element in one of the arrays that can |
| | be used for type detextion. |
| | """ |
| | first_valid_index = series.first_valid_index() |
| | if first_valid_index is None: |
| | return None |
| |
|
| | valid_array = series.at[first_valid_index] |
| | valid_item = next((item for item in valid_array if not pandas.isna(item)), None) |
| |
|
| | if valid_item is not None: |
| | return valid_item |
| |
|
| | |
| | |
| | for array in islice(series, first_valid_index + 1, None): |
| | try: |
| | array_iter = iter(array) |
| | except TypeError: |
| | continue |
| | valid_item = next((item for item in array_iter if not pandas.isna(item)), None) |
| | if valid_item is not None: |
| | break |
| |
|
| | return valid_item |
| |
|
| |
|
| | def dataframe_to_bq_schema(dataframe, bq_schema): |
| | """Convert a pandas DataFrame schema to a BigQuery schema. |
| | |
| | Args: |
| | dataframe (pandas.DataFrame): |
| | DataFrame for which the client determines the BigQuery schema. |
| | bq_schema (Sequence[Union[ \ |
| | :class:`~google.cloud.bigquery.schema.SchemaField`, \ |
| | Mapping[str, Any] \ |
| | ]]): |
| | A BigQuery schema. Use this argument to override the autodetected |
| | type for some or all of the DataFrame columns. |
| | |
| | Returns: |
| | Optional[Sequence[google.cloud.bigquery.schema.SchemaField]]: |
| | The automatically determined schema. Returns None if the type of |
| | any column cannot be determined. |
| | """ |
| | if bq_schema: |
| | bq_schema = schema._to_schema_fields(bq_schema) |
| | bq_schema_index = {field.name: field for field in bq_schema} |
| | bq_schema_unused = set(bq_schema_index.keys()) |
| | else: |
| | bq_schema_index = {} |
| | bq_schema_unused = set() |
| |
|
| | bq_schema_out = [] |
| | unknown_type_fields = [] |
| |
|
| | for column, dtype in list_columns_and_indexes(dataframe): |
| | |
| | bq_field = bq_schema_index.get(column) |
| | if bq_field: |
| | bq_schema_out.append(bq_field) |
| | bq_schema_unused.discard(bq_field.name) |
| | continue |
| |
|
| | |
| | |
| | bq_type = _PANDAS_DTYPE_TO_BQ.get(dtype.name) |
| | if bq_type is None: |
| | sample_data = _first_valid(dataframe.reset_index()[column]) |
| | if ( |
| | isinstance(sample_data, _BaseGeometry) |
| | and sample_data is not None |
| | ): |
| | bq_type = "GEOGRAPHY" |
| | bq_field = schema.SchemaField(column, bq_type) |
| | bq_schema_out.append(bq_field) |
| |
|
| | if bq_field.field_type is None: |
| | unknown_type_fields.append(bq_field) |
| |
|
| | |
| | |
| | if bq_schema_unused: |
| | raise ValueError( |
| | "bq_schema contains fields not present in dataframe: {}".format( |
| | bq_schema_unused |
| | ) |
| | ) |
| |
|
| | |
| | |
| | if unknown_type_fields: |
| | if not pyarrow: |
| | msg = "Could not determine the type of columns: {}".format( |
| | ", ".join(field.name for field in unknown_type_fields) |
| | ) |
| | warnings.warn(msg) |
| | return None |
| |
|
| | |
| | |
| | bq_schema_out = augment_schema(dataframe, bq_schema_out) |
| |
|
| | return tuple(bq_schema_out) if bq_schema_out else None |
| |
|
| |
|
| | def augment_schema(dataframe, current_bq_schema): |
| | """Try to deduce the unknown field types and return an improved schema. |
| | |
| | This function requires ``pyarrow`` to run. If all the missing types still |
| | cannot be detected, ``None`` is returned. If all types are already known, |
| | a shallow copy of the given schema is returned. |
| | |
| | Args: |
| | dataframe (pandas.DataFrame): |
| | DataFrame for which some of the field types are still unknown. |
| | current_bq_schema (Sequence[google.cloud.bigquery.schema.SchemaField]): |
| | A BigQuery schema for ``dataframe``. The types of some or all of |
| | the fields may be ``None``. |
| | Returns: |
| | Optional[Sequence[google.cloud.bigquery.schema.SchemaField]] |
| | """ |
| | |
| | augmented_schema = [] |
| | unknown_type_fields = [] |
| | for field in current_bq_schema: |
| | if field.field_type is not None: |
| | augmented_schema.append(field) |
| | continue |
| |
|
| | arrow_table = pyarrow.array(dataframe.reset_index()[field.name]) |
| |
|
| | if pyarrow.types.is_list(arrow_table.type): |
| | |
| | detected_mode = "REPEATED" |
| | detected_type = _pyarrow_helpers.arrow_scalar_ids_to_bq( |
| | arrow_table.values.type.id |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | if detected_type == "TIMESTAMP": |
| | valid_item = _first_array_valid(dataframe[field.name]) |
| | if isinstance(valid_item, datetime) and valid_item.tzinfo is None: |
| | detected_type = "DATETIME" |
| | else: |
| | detected_mode = field.mode |
| | detected_type = _pyarrow_helpers.arrow_scalar_ids_to_bq(arrow_table.type.id) |
| | if detected_type == "NUMERIC" and arrow_table.type.scale > 9: |
| | detected_type = "BIGNUMERIC" |
| |
|
| | if detected_type is None: |
| | unknown_type_fields.append(field) |
| | continue |
| |
|
| | new_field = schema.SchemaField( |
| | name=field.name, |
| | field_type=detected_type, |
| | mode=detected_mode, |
| | description=field.description, |
| | fields=field.fields, |
| | ) |
| | augmented_schema.append(new_field) |
| |
|
| | if unknown_type_fields: |
| | warnings.warn( |
| | "Pyarrow could not determine the type of columns: {}.".format( |
| | ", ".join(field.name for field in unknown_type_fields) |
| | ) |
| | ) |
| | return None |
| |
|
| | return augmented_schema |
| | |
| |
|
| |
|
| | def dataframe_to_arrow(dataframe, bq_schema): |
| | """Convert pandas dataframe to Arrow table, using BigQuery schema. |
| | |
| | Args: |
| | dataframe (pandas.DataFrame): |
| | DataFrame to convert to Arrow table. |
| | bq_schema (Sequence[Union[ \ |
| | :class:`~google.cloud.bigquery.schema.SchemaField`, \ |
| | Mapping[str, Any] \ |
| | ]]): |
| | Desired BigQuery schema. The number of columns must match the |
| | number of columns in the DataFrame. |
| | |
| | Returns: |
| | pyarrow.Table: |
| | Table containing dataframe data, with schema derived from |
| | BigQuery schema. |
| | """ |
| | column_names = set(dataframe.columns) |
| | column_and_index_names = set( |
| | name for name, _ in list_columns_and_indexes(dataframe) |
| | ) |
| |
|
| | bq_schema = schema._to_schema_fields(bq_schema) |
| | bq_field_names = set(field.name for field in bq_schema) |
| |
|
| | extra_fields = bq_field_names - column_and_index_names |
| | if extra_fields: |
| | raise ValueError( |
| | "bq_schema contains fields not present in dataframe: {}".format( |
| | extra_fields |
| | ) |
| | ) |
| |
|
| | |
| | |
| | missing_fields = column_names - bq_field_names |
| | if missing_fields: |
| | raise ValueError( |
| | "bq_schema is missing fields from dataframe: {}".format(missing_fields) |
| | ) |
| |
|
| | arrow_arrays = [] |
| | arrow_names = [] |
| | arrow_fields = [] |
| | for bq_field in bq_schema: |
| | arrow_names.append(bq_field.name) |
| | arrow_arrays.append( |
| | bq_to_arrow_array(get_column_or_index(dataframe, bq_field.name), bq_field) |
| | ) |
| | arrow_fields.append(bq_to_arrow_field(bq_field, arrow_arrays[-1].type)) |
| |
|
| | if all((field is not None for field in arrow_fields)): |
| | return pyarrow.Table.from_arrays( |
| | arrow_arrays, schema=pyarrow.schema(arrow_fields) |
| | ) |
| | return pyarrow.Table.from_arrays(arrow_arrays, names=arrow_names) |
| |
|
| |
|
| | def dataframe_to_parquet( |
| | dataframe, |
| | bq_schema, |
| | filepath, |
| | parquet_compression="SNAPPY", |
| | parquet_use_compliant_nested_type=True, |
| | ): |
| | """Write dataframe as a Parquet file, according to the desired BQ schema. |
| | |
| | This function requires the :mod:`pyarrow` package. Arrow is used as an |
| | intermediate format. |
| | |
| | Args: |
| | dataframe (pandas.DataFrame): |
| | DataFrame to convert to Parquet file. |
| | bq_schema (Sequence[Union[ \ |
| | :class:`~google.cloud.bigquery.schema.SchemaField`, \ |
| | Mapping[str, Any] \ |
| | ]]): |
| | Desired BigQuery schema. Number of columns must match number of |
| | columns in the DataFrame. |
| | filepath (str): |
| | Path to write Parquet file to. |
| | parquet_compression (Optional[str]): |
| | The compression codec to use by the the ``pyarrow.parquet.write_table`` |
| | serializing method. Defaults to "SNAPPY". |
| | https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table |
| | parquet_use_compliant_nested_type (bool): |
| | Whether the ``pyarrow.parquet.write_table`` serializing method should write |
| | compliant Parquet nested type (lists). Defaults to ``True``. |
| | https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#nested-types |
| | https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html#pyarrow-parquet-write-table |
| | |
| | This argument is ignored for ``pyarrow`` versions earlier than ``4.0.0``. |
| | """ |
| | pyarrow = _versions_helpers.PYARROW_VERSIONS.try_import(raise_if_error=True) |
| |
|
| | import pyarrow.parquet |
| |
|
| | kwargs = ( |
| | {"use_compliant_nested_type": parquet_use_compliant_nested_type} |
| | if _versions_helpers.PYARROW_VERSIONS.use_compliant_nested_type |
| | else {} |
| | ) |
| |
|
| | bq_schema = schema._to_schema_fields(bq_schema) |
| | arrow_table = dataframe_to_arrow(dataframe, bq_schema) |
| | pyarrow.parquet.write_table( |
| | arrow_table, |
| | filepath, |
| | compression=parquet_compression, |
| | **kwargs, |
| | ) |
| |
|
| |
|
| | def _row_iterator_page_to_arrow(page, column_names, arrow_types): |
| | |
| | try: |
| | next(iter(page)) |
| | except StopIteration: |
| | pass |
| |
|
| | arrays = [] |
| | for column_index, arrow_type in enumerate(arrow_types): |
| | arrays.append(pyarrow.array(page._columns[column_index], type=arrow_type)) |
| |
|
| | if isinstance(column_names, pyarrow.Schema): |
| | return pyarrow.RecordBatch.from_arrays(arrays, schema=column_names) |
| | return pyarrow.RecordBatch.from_arrays(arrays, names=column_names) |
| |
|
| |
|
| | def download_arrow_row_iterator(pages, bq_schema): |
| | """Use HTTP JSON RowIterator to construct an iterable of RecordBatches. |
| | |
| | Args: |
| | pages (Iterator[:class:`google.api_core.page_iterator.Page`]): |
| | An iterator over the result pages. |
| | bq_schema (Sequence[Union[ \ |
| | :class:`~google.cloud.bigquery.schema.SchemaField`, \ |
| | Mapping[str, Any] \ |
| | ]]): |
| | A decription of the fields in result pages. |
| | Yields: |
| | :class:`pyarrow.RecordBatch` |
| | The next page of records as a ``pyarrow`` record batch. |
| | """ |
| | bq_schema = schema._to_schema_fields(bq_schema) |
| | column_names = bq_to_arrow_schema(bq_schema) or [field.name for field in bq_schema] |
| | arrow_types = [bq_to_arrow_data_type(field) for field in bq_schema] |
| |
|
| | for page in pages: |
| | yield _row_iterator_page_to_arrow(page, column_names, arrow_types) |
| |
|
| |
|
| | def _row_iterator_page_to_dataframe(page, column_names, dtypes): |
| | |
| | try: |
| | next(iter(page)) |
| | except StopIteration: |
| | pass |
| |
|
| | columns = {} |
| | for column_index, column_name in enumerate(column_names): |
| | dtype = dtypes.get(column_name) |
| | columns[column_name] = pandas.Series(page._columns[column_index], dtype=dtype) |
| |
|
| | return pandas.DataFrame(columns, columns=column_names) |
| |
|
| |
|
| | def download_dataframe_row_iterator(pages, bq_schema, dtypes): |
| | """Use HTTP JSON RowIterator to construct a DataFrame. |
| | |
| | Args: |
| | pages (Iterator[:class:`google.api_core.page_iterator.Page`]): |
| | An iterator over the result pages. |
| | bq_schema (Sequence[Union[ \ |
| | :class:`~google.cloud.bigquery.schema.SchemaField`, \ |
| | Mapping[str, Any] \ |
| | ]]): |
| | A decription of the fields in result pages. |
| | dtypes(Mapping[str, numpy.dtype]): |
| | The types of columns in result data to hint construction of the |
| | resulting DataFrame. Not all column types have to be specified. |
| | Yields: |
| | :class:`pandas.DataFrame` |
| | The next page of records as a ``pandas.DataFrame`` record batch. |
| | """ |
| | bq_schema = schema._to_schema_fields(bq_schema) |
| | column_names = [field.name for field in bq_schema] |
| | for page in pages: |
| | yield _row_iterator_page_to_dataframe(page, column_names, dtypes) |
| |
|
| |
|
| | def _bqstorage_page_to_arrow(page): |
| | return page.to_arrow() |
| |
|
| |
|
| | def _bqstorage_page_to_dataframe(column_names, dtypes, page): |
| | |
| | |
| | return page.to_dataframe(dtypes=dtypes)[column_names] |
| |
|
| |
|
| | def _download_table_bqstorage_stream( |
| | download_state, bqstorage_client, session, stream, worker_queue, page_to_item |
| | ): |
| | reader = bqstorage_client.read_rows(stream.name) |
| |
|
| | |
| | |
| | if _versions_helpers.BQ_STORAGE_VERSIONS.is_read_session_optional: |
| | rowstream = reader.rows() |
| | else: |
| | rowstream = reader.rows(session) |
| |
|
| | for page in rowstream.pages: |
| | if download_state.done: |
| | return |
| | item = page_to_item(page) |
| | worker_queue.put(item) |
| |
|
| |
|
| | def _nowait(futures): |
| | """Separate finished and unfinished threads, much like |
| | :func:`concurrent.futures.wait`, but don't wait. |
| | """ |
| | done = [] |
| | not_done = [] |
| | for future in futures: |
| | if future.done(): |
| | done.append(future) |
| | else: |
| | not_done.append(future) |
| | return done, not_done |
| |
|
| |
|
| | def _download_table_bqstorage( |
| | project_id, |
| | table, |
| | bqstorage_client, |
| | preserve_order=False, |
| | selected_fields=None, |
| | page_to_item=None, |
| | max_queue_size=_MAX_QUEUE_SIZE_DEFAULT, |
| | ): |
| | """Use (faster, but billable) BQ Storage API to construct DataFrame.""" |
| |
|
| | |
| | |
| | from google.cloud import bigquery_storage |
| |
|
| | if "$" in table.table_id: |
| | raise ValueError( |
| | "Reading from a specific partition is not currently supported." |
| | ) |
| | if "@" in table.table_id: |
| | raise ValueError("Reading from a specific snapshot is not currently supported.") |
| |
|
| | requested_streams = 1 if preserve_order else 0 |
| |
|
| | requested_session = bigquery_storage.types.ReadSession( |
| | table=table.to_bqstorage(), data_format=bigquery_storage.types.DataFormat.ARROW |
| | ) |
| | if selected_fields is not None: |
| | for field in selected_fields: |
| | requested_session.read_options.selected_fields.append(field.name) |
| |
|
| | if _ARROW_COMPRESSION_SUPPORT: |
| | requested_session.read_options.arrow_serialization_options.buffer_compression = ( |
| | ArrowSerializationOptions.CompressionCodec.LZ4_FRAME |
| | ) |
| |
|
| | session = bqstorage_client.create_read_session( |
| | parent="projects/{}".format(project_id), |
| | read_session=requested_session, |
| | max_stream_count=requested_streams, |
| | ) |
| |
|
| | _LOGGER.debug( |
| | "Started reading table '{}.{}.{}' with BQ Storage API session '{}'.".format( |
| | table.project, table.dataset_id, table.table_id, session.name |
| | ) |
| | ) |
| |
|
| | |
| | if not session.streams: |
| | return |
| |
|
| | total_streams = len(session.streams) |
| |
|
| | |
| | |
| | download_state = _DownloadState() |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | if max_queue_size is _MAX_QUEUE_SIZE_DEFAULT: |
| | max_queue_size = total_streams |
| | elif max_queue_size is None: |
| | max_queue_size = 0 |
| |
|
| | worker_queue = queue.Queue(maxsize=max_queue_size) |
| |
|
| | with concurrent.futures.ThreadPoolExecutor(max_workers=total_streams) as pool: |
| | try: |
| | |
| | |
| | |
| | |
| | not_done = [ |
| | pool.submit( |
| | _download_table_bqstorage_stream, |
| | download_state, |
| | bqstorage_client, |
| | session, |
| | stream, |
| | worker_queue, |
| | page_to_item, |
| | ) |
| | for stream in session.streams |
| | ] |
| |
|
| | while not_done: |
| | |
| | |
| | |
| | |
| | |
| | |
| | done, not_done = _nowait(not_done) |
| | for future in done: |
| | |
| | |
| | future.result() |
| |
|
| | try: |
| | frame = worker_queue.get(timeout=_PROGRESS_INTERVAL) |
| | yield frame |
| | except queue.Empty: |
| | continue |
| |
|
| | |
| | while True: |
| | try: |
| | frame = worker_queue.get_nowait() |
| | yield frame |
| | except queue.Empty: |
| | break |
| | finally: |
| | |
| | |
| | |
| | download_state.done = True |
| |
|
| | |
| | |
| | pool.shutdown(wait=True) |
| |
|
| |
|
| | def download_arrow_bqstorage( |
| | project_id, |
| | table, |
| | bqstorage_client, |
| | preserve_order=False, |
| | selected_fields=None, |
| | max_queue_size=_MAX_QUEUE_SIZE_DEFAULT, |
| | ): |
| | return _download_table_bqstorage( |
| | project_id, |
| | table, |
| | bqstorage_client, |
| | preserve_order=preserve_order, |
| | selected_fields=selected_fields, |
| | page_to_item=_bqstorage_page_to_arrow, |
| | max_queue_size=max_queue_size, |
| | ) |
| |
|
| |
|
| | def download_dataframe_bqstorage( |
| | project_id, |
| | table, |
| | bqstorage_client, |
| | column_names, |
| | dtypes, |
| | preserve_order=False, |
| | selected_fields=None, |
| | max_queue_size=_MAX_QUEUE_SIZE_DEFAULT, |
| | ): |
| | page_to_item = functools.partial(_bqstorage_page_to_dataframe, column_names, dtypes) |
| | return _download_table_bqstorage( |
| | project_id, |
| | table, |
| | bqstorage_client, |
| | preserve_order=preserve_order, |
| | selected_fields=selected_fields, |
| | page_to_item=page_to_item, |
| | max_queue_size=max_queue_size, |
| | ) |
| |
|
| |
|
| | def dataframe_to_json_generator(dataframe): |
| | for row in dataframe.itertuples(index=False, name=None): |
| | output = {} |
| | for column, value in zip(dataframe.columns, row): |
| | |
| | is_nan = pandas.isna(value) |
| |
|
| | |
| | |
| | |
| | if isinstance(is_nan, bool) and is_nan: |
| | continue |
| |
|
| | |
| | |
| | if isinstance(value, numpy.bool_): |
| | value = bool(value) |
| | elif isinstance( |
| | value, |
| | ( |
| | numpy.int64, |
| | numpy.int32, |
| | numpy.int16, |
| | numpy.int8, |
| | numpy.uint64, |
| | numpy.uint32, |
| | numpy.uint16, |
| | numpy.uint8, |
| | ), |
| | ): |
| | value = int(value) |
| | output[column] = value |
| |
|
| | yield output |
| |
|
| |
|
| | def verify_pandas_imports(): |
| | if pandas is None: |
| | raise ValueError(_NO_PANDAS_ERROR) from pandas_import_exception |
| | if db_dtypes is None: |
| | raise ValueError(_NO_DB_TYPES_ERROR) from db_dtypes_import_exception |
| |
|