| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | import uuid |
| | from copy import copy |
| | from typing import Any, Dict |
| |
|
| | import pyarrow as pa |
| | import pytest |
| | from pydantic import ValidationError |
| | from sortedcontainers import SortedList |
| |
|
| | from pyiceberg.catalog.noop import NoopCatalog |
| | from pyiceberg.exceptions import CommitFailedException |
| | from pyiceberg.expressions import ( |
| | AlwaysTrue, |
| | And, |
| | EqualTo, |
| | In, |
| | ) |
| | from pyiceberg.io import PY_IO_IMPL, load_file_io |
| | from pyiceberg.manifest import ( |
| | DataFile, |
| | DataFileContent, |
| | FileFormat, |
| | ManifestEntry, |
| | ManifestEntryStatus, |
| | ) |
| | from pyiceberg.partitioning import PartitionField, PartitionSpec |
| | from pyiceberg.schema import Schema |
| | from pyiceberg.table import ( |
| | AddSnapshotUpdate, |
| | AddSortOrderUpdate, |
| | AssertCreate, |
| | AssertCurrentSchemaId, |
| | AssertDefaultSortOrderId, |
| | AssertDefaultSpecId, |
| | AssertLastAssignedFieldId, |
| | AssertLastAssignedPartitionId, |
| | AssertRefSnapshotId, |
| | AssertTableUUID, |
| | CommitTableRequest, |
| | RemovePropertiesUpdate, |
| | SetDefaultSortOrderUpdate, |
| | SetPropertiesUpdate, |
| | SetSnapshotRefUpdate, |
| | StaticTable, |
| | Table, |
| | TableIdentifier, |
| | UpdateSchema, |
| | _apply_table_update, |
| | _check_schema_compatible, |
| | _determine_partitions, |
| | _match_deletes_to_data_file, |
| | _TableMetadataUpdateContext, |
| | update_table_metadata, |
| | ) |
| | from pyiceberg.table.metadata import INITIAL_SEQUENCE_NUMBER, TableMetadataUtil, TableMetadataV2, _generate_snapshot_id |
| | from pyiceberg.table.refs import SnapshotRef |
| | from pyiceberg.table.snapshots import ( |
| | Operation, |
| | Snapshot, |
| | SnapshotLogEntry, |
| | Summary, |
| | ancestors_of, |
| | ) |
| | from pyiceberg.table.sorting import ( |
| | NullOrder, |
| | SortDirection, |
| | SortField, |
| | SortOrder, |
| | ) |
| | from pyiceberg.transforms import ( |
| | BucketTransform, |
| | IdentityTransform, |
| | ) |
| | from pyiceberg.typedef import Record |
| | from pyiceberg.types import ( |
| | BinaryType, |
| | BooleanType, |
| | DateType, |
| | DoubleType, |
| | FloatType, |
| | IntegerType, |
| | ListType, |
| | LongType, |
| | MapType, |
| | NestedField, |
| | PrimitiveType, |
| | StringType, |
| | StructType, |
| | TimestampType, |
| | TimestamptzType, |
| | TimeType, |
| | UUIDType, |
| | ) |
| |
|
| |
|
| | def test_schema(table_v2: Table) -> None: |
| | assert table_v2.schema() == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | identifier_field_ids=[1, 2], |
| | ) |
| | assert table_v2.schema().schema_id == 1 |
| |
|
| |
|
| | def test_schemas(table_v2: Table) -> None: |
| | assert table_v2.schemas() == { |
| | 0: Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | identifier_field_ids=[], |
| | ), |
| | 1: Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | identifier_field_ids=[1, 2], |
| | ), |
| | } |
| | assert table_v2.schemas()[0].schema_id == 0 |
| | assert table_v2.schemas()[1].schema_id == 1 |
| |
|
| |
|
| | def test_spec(table_v2: Table) -> None: |
| | assert table_v2.spec() == PartitionSpec( |
| | PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x"), spec_id=0 |
| | ) |
| |
|
| |
|
| | def test_specs(table_v2: Table) -> None: |
| | assert table_v2.specs() == { |
| | 0: PartitionSpec(PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x"), spec_id=0) |
| | } |
| |
|
| |
|
| | def test_sort_order(table_v2: Table) -> None: |
| | assert table_v2.sort_order() == SortOrder( |
| | SortField(source_id=2, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), |
| | SortField( |
| | source_id=3, |
| | transform=BucketTransform(num_buckets=4), |
| | direction=SortDirection.DESC, |
| | null_order=NullOrder.NULLS_LAST, |
| | ), |
| | order_id=3, |
| | ) |
| |
|
| |
|
| | def test_sort_orders(table_v2: Table) -> None: |
| | assert table_v2.sort_orders() == { |
| | 3: SortOrder( |
| | SortField(source_id=2, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), |
| | SortField( |
| | source_id=3, |
| | transform=BucketTransform(num_buckets=4), |
| | direction=SortDirection.DESC, |
| | null_order=NullOrder.NULLS_LAST, |
| | ), |
| | order_id=3, |
| | ) |
| | } |
| |
|
| |
|
| | def test_location(table_v2: Table) -> None: |
| | assert table_v2.location() == "s3://bucket/test/location" |
| |
|
| |
|
| | def test_current_snapshot(table_v2: Table) -> None: |
| | assert table_v2.current_snapshot() == Snapshot( |
| | snapshot_id=3055729675574597004, |
| | parent_snapshot_id=3051729675574597004, |
| | sequence_number=1, |
| | timestamp_ms=1555100955770, |
| | manifest_list="s3://a/b/2.avro", |
| | summary=Summary(operation=Operation.APPEND), |
| | schema_id=1, |
| | ) |
| |
|
| |
|
| | def test_snapshot_by_id(table_v2: Table) -> None: |
| | assert table_v2.snapshot_by_id(3055729675574597004) == Snapshot( |
| | snapshot_id=3055729675574597004, |
| | parent_snapshot_id=3051729675574597004, |
| | sequence_number=1, |
| | timestamp_ms=1555100955770, |
| | manifest_list="s3://a/b/2.avro", |
| | summary=Summary(operation=Operation.APPEND), |
| | schema_id=1, |
| | ) |
| |
|
| |
|
| | def test_snapshot_by_timestamp(table_v2: Table) -> None: |
| | assert table_v2.snapshot_as_of_timestamp(1515100955770) == Snapshot( |
| | snapshot_id=3051729675574597004, |
| | parent_snapshot_id=None, |
| | sequence_number=0, |
| | timestamp_ms=1515100955770, |
| | manifest_list="s3://a/b/1.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=None, |
| | ) |
| | assert table_v2.snapshot_as_of_timestamp(1515100955770, inclusive=False) is None |
| |
|
| |
|
| | def test_ancestors_of(table_v2: Table) -> None: |
| | assert list(ancestors_of(table_v2.current_snapshot(), table_v2.metadata)) == [ |
| | Snapshot( |
| | snapshot_id=3055729675574597004, |
| | parent_snapshot_id=3051729675574597004, |
| | sequence_number=1, |
| | timestamp_ms=1555100955770, |
| | manifest_list="s3://a/b/2.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=1, |
| | ), |
| | Snapshot( |
| | snapshot_id=3051729675574597004, |
| | parent_snapshot_id=None, |
| | sequence_number=0, |
| | timestamp_ms=1515100955770, |
| | manifest_list="s3://a/b/1.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=None, |
| | ), |
| | ] |
| |
|
| |
|
| | def test_snapshot_by_id_does_not_exist(table_v2: Table) -> None: |
| | assert table_v2.snapshot_by_id(-1) is None |
| |
|
| |
|
| | def test_snapshot_by_name(table_v2: Table) -> None: |
| | assert table_v2.snapshot_by_name("test") == Snapshot( |
| | snapshot_id=3051729675574597004, |
| | parent_snapshot_id=None, |
| | sequence_number=0, |
| | timestamp_ms=1515100955770, |
| | manifest_list="s3://a/b/1.avro", |
| | summary=Summary(operation=Operation.APPEND), |
| | schema_id=None, |
| | ) |
| |
|
| |
|
| | def test_snapshot_by_name_does_not_exist(table_v2: Table) -> None: |
| | assert table_v2.snapshot_by_name("doesnotexist") is None |
| |
|
| |
|
| | def test_repr(table_v2: Table) -> None: |
| | expected = """table( |
| | 1: x: required long, |
| | 2: y: required long (comment), |
| | 3: z: required long |
| | ), |
| | partition by: [x], |
| | sort order: [2 ASC NULLS FIRST, bucket[4](3) DESC NULLS LAST], |
| | snapshot: Operation.APPEND: id=3055729675574597004, parent_id=3051729675574597004, schema_id=1""" |
| | assert repr(table_v2) == expected |
| |
|
| |
|
| | def test_history(table_v2: Table) -> None: |
| | assert table_v2.history() == [ |
| | SnapshotLogEntry(snapshot_id=3051729675574597004, timestamp_ms=1515100955770), |
| | SnapshotLogEntry(snapshot_id=3055729675574597004, timestamp_ms=1555100955770), |
| | ] |
| |
|
| |
|
| | def test_table_scan_select(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | assert scan.selected_fields == ("*",) |
| | assert scan.select("a", "b").selected_fields == ("a", "b") |
| | assert scan.select("a", "c").select("a").selected_fields == ("a",) |
| |
|
| |
|
| | def test_table_scan_row_filter(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | assert scan.row_filter == AlwaysTrue() |
| | assert scan.filter(EqualTo("x", 10)).row_filter == EqualTo("x", 10) |
| | assert scan.filter(EqualTo("x", 10)).filter(In("y", (10, 11))).row_filter == And(EqualTo("x", 10), In("y", (10, 11))) |
| |
|
| |
|
| | def test_table_scan_ref(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | assert scan.use_ref("test").snapshot_id == 3051729675574597004 |
| |
|
| |
|
| | def test_table_scan_ref_does_not_exists(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| |
|
| | with pytest.raises(ValueError) as exc_info: |
| | _ = scan.use_ref("boom") |
| |
|
| | assert "Cannot scan unknown ref=boom" in str(exc_info.value) |
| |
|
| |
|
| | def test_table_scan_projection_full_schema(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | projection_schema = scan.select("x", "y", "z").projection() |
| | assert projection_schema == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | identifier_field_ids=[1, 2], |
| | ) |
| | assert projection_schema.schema_id == 1 |
| |
|
| |
|
| | def test_table_scan_projection_single_column(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | projection_schema = scan.select("y").projection() |
| | assert projection_schema == Schema( |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | identifier_field_ids=[2], |
| | ) |
| | assert projection_schema.schema_id == 1 |
| |
|
| |
|
| | def test_table_scan_projection_single_column_case_sensitive(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| | projection_schema = scan.with_case_sensitive(False).select("Y").projection() |
| | assert projection_schema == Schema( |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | identifier_field_ids=[2], |
| | ) |
| | assert projection_schema.schema_id == 1 |
| |
|
| |
|
| | def test_table_scan_projection_unknown_column(table_v2: Table) -> None: |
| | scan = table_v2.scan() |
| |
|
| | with pytest.raises(ValueError) as exc_info: |
| | _ = scan.select("a").projection() |
| |
|
| | assert "Could not find column: 'a'" in str(exc_info.value) |
| |
|
| |
|
| | def test_static_table_same_as_table(table_v2: Table, metadata_location: str) -> None: |
| | static_table = StaticTable.from_metadata(metadata_location) |
| | assert isinstance(static_table, Table) |
| | assert static_table.metadata == table_v2.metadata |
| |
|
| |
|
| | def test_static_table_gz_same_as_table(table_v2: Table, metadata_location_gz: str) -> None: |
| | static_table = StaticTable.from_metadata(metadata_location_gz) |
| | assert isinstance(static_table, Table) |
| | assert static_table.metadata == table_v2.metadata |
| |
|
| |
|
| | def test_static_table_io_does_not_exist(metadata_location: str) -> None: |
| | with pytest.raises(ValueError): |
| | StaticTable.from_metadata(metadata_location, {PY_IO_IMPL: "pyiceberg.does.not.exist.FileIO"}) |
| |
|
| |
|
| | def test_match_deletes_to_datafile() -> None: |
| | data_entry = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=1, |
| | data_file=DataFile( |
| | content=DataFileContent.DATA, |
| | file_path="s3://bucket/0000.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | ), |
| | ) |
| | delete_entry_1 = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=0, |
| | data_file=DataFile( |
| | content=DataFileContent.POSITION_DELETES, |
| | file_path="s3://bucket/0001-delete.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | ), |
| | ) |
| | delete_entry_2 = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=3, |
| | data_file=DataFile( |
| | content=DataFileContent.POSITION_DELETES, |
| | file_path="s3://bucket/0002-delete.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | |
| | value_counts={}, |
| | null_value_counts={}, |
| | nan_value_counts={}, |
| | lower_bounds={}, |
| | upper_bounds={}, |
| | ), |
| | ) |
| | assert _match_deletes_to_data_file( |
| | data_entry, |
| | SortedList(iterable=[delete_entry_1, delete_entry_2], key=lambda entry: entry.sequence_number or INITIAL_SEQUENCE_NUMBER), |
| | ) == { |
| | delete_entry_2.data_file, |
| | } |
| |
|
| |
|
| | def test_match_deletes_to_datafile_duplicate_number() -> None: |
| | data_entry = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=1, |
| | data_file=DataFile( |
| | content=DataFileContent.DATA, |
| | file_path="s3://bucket/0000.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | ), |
| | ) |
| | delete_entry_1 = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=3, |
| | data_file=DataFile( |
| | content=DataFileContent.POSITION_DELETES, |
| | file_path="s3://bucket/0001-delete.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | |
| | value_counts={}, |
| | null_value_counts={}, |
| | nan_value_counts={}, |
| | lower_bounds={}, |
| | upper_bounds={}, |
| | ), |
| | ) |
| | delete_entry_2 = ManifestEntry( |
| | status=ManifestEntryStatus.ADDED, |
| | sequence_number=3, |
| | data_file=DataFile( |
| | content=DataFileContent.POSITION_DELETES, |
| | file_path="s3://bucket/0002-delete.parquet", |
| | file_format=FileFormat.PARQUET, |
| | partition={}, |
| | record_count=3, |
| | file_size_in_bytes=3, |
| | |
| | value_counts={}, |
| | null_value_counts={}, |
| | nan_value_counts={}, |
| | lower_bounds={}, |
| | upper_bounds={}, |
| | ), |
| | ) |
| | assert _match_deletes_to_data_file( |
| | data_entry, |
| | SortedList(iterable=[delete_entry_1, delete_entry_2], key=lambda entry: entry.sequence_number or INITIAL_SEQUENCE_NUMBER), |
| | ) == { |
| | delete_entry_1.data_file, |
| | delete_entry_2.data_file, |
| | } |
| |
|
| |
|
| | def test_serialize_set_properties_updates() -> None: |
| | assert ( |
| | SetPropertiesUpdate(updates={"abc": "🤪"}).model_dump_json() == """{"action":"set-properties","updates":{"abc":"🤪"}}""" |
| | ) |
| |
|
| |
|
| | def test_add_column(table_v2: Table) -> None: |
| | update = UpdateSchema(transaction=table_v2.transaction()) |
| | update.add_column(path="b", field_type=IntegerType()) |
| | apply_schema: Schema = update._apply() |
| | assert len(apply_schema.fields) == 4 |
| |
|
| | assert apply_schema == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | NestedField(field_id=4, name="b", field_type=IntegerType(), required=False), |
| | identifier_field_ids=[1, 2], |
| | ) |
| | assert apply_schema.schema_id == 2 |
| | assert apply_schema.highest_field_id == 4 |
| |
|
| |
|
| | def test_add_primitive_type_column(table_v2: Table) -> None: |
| | primitive_type: Dict[str, PrimitiveType] = { |
| | "boolean": BooleanType(), |
| | "int": IntegerType(), |
| | "long": LongType(), |
| | "float": FloatType(), |
| | "double": DoubleType(), |
| | "date": DateType(), |
| | "time": TimeType(), |
| | "timestamp": TimestampType(), |
| | "timestamptz": TimestamptzType(), |
| | "string": StringType(), |
| | "uuid": UUIDType(), |
| | "binary": BinaryType(), |
| | } |
| |
|
| | for name, type_ in primitive_type.items(): |
| | field_name = f"new_column_{name}" |
| | update = UpdateSchema(transaction=table_v2.transaction()) |
| | update.add_column(path=field_name, field_type=type_, doc=f"new_column_{name}") |
| | new_schema = update._apply() |
| |
|
| | field: NestedField = new_schema.find_field(field_name) |
| | assert field.field_type == type_ |
| | assert field.doc == f"new_column_{name}" |
| |
|
| |
|
| | def test_add_nested_type_column(table_v2: Table) -> None: |
| | |
| | field_name = "new_column_struct" |
| | update = UpdateSchema(transaction=table_v2.transaction()) |
| | struct_ = StructType( |
| | NestedField(1, "lat", DoubleType()), |
| | NestedField(2, "long", DoubleType()), |
| | ) |
| | update.add_column(path=field_name, field_type=struct_) |
| | schema_ = update._apply() |
| | field: NestedField = schema_.find_field(field_name) |
| | assert field.field_type == StructType( |
| | NestedField(5, "lat", DoubleType()), |
| | NestedField(6, "long", DoubleType()), |
| | ) |
| | assert schema_.highest_field_id == 6 |
| |
|
| |
|
| | def test_add_nested_map_type_column(table_v2: Table) -> None: |
| | |
| | field_name = "new_column_map" |
| | update = UpdateSchema(transaction=table_v2.transaction()) |
| | map_ = MapType(1, StringType(), 2, IntegerType(), False) |
| | update.add_column(path=field_name, field_type=map_) |
| | new_schema = update._apply() |
| | field: NestedField = new_schema.find_field(field_name) |
| | assert field.field_type == MapType(5, StringType(), 6, IntegerType(), False) |
| | assert new_schema.highest_field_id == 6 |
| |
|
| |
|
| | def test_add_nested_list_type_column(table_v2: Table) -> None: |
| | |
| | field_name = "new_column_list" |
| | update = UpdateSchema(transaction=table_v2.transaction()) |
| | list_ = ListType( |
| | element_id=101, |
| | element_type=StructType( |
| | NestedField(102, "lat", DoubleType()), |
| | NestedField(103, "long", DoubleType()), |
| | ), |
| | element_required=False, |
| | ) |
| | update.add_column(path=field_name, field_type=list_) |
| | new_schema = update._apply() |
| | field: NestedField = new_schema.find_field(field_name) |
| | assert field.field_type == ListType( |
| | element_id=5, |
| | element_type=StructType( |
| | NestedField(6, "lat", DoubleType()), |
| | NestedField(7, "long", DoubleType()), |
| | ), |
| | element_required=False, |
| | ) |
| | assert new_schema.highest_field_id == 7 |
| |
|
| |
|
| | def test_apply_set_properties_update(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| |
|
| | new_metadata_no_update = update_table_metadata(base_metadata, (SetPropertiesUpdate(updates={}),)) |
| | assert new_metadata_no_update == base_metadata |
| |
|
| | new_metadata = update_table_metadata( |
| | base_metadata, (SetPropertiesUpdate(updates={"read.split.target.size": "123", "test_a": "test_a", "test_b": "test_b"}),) |
| | ) |
| |
|
| | assert base_metadata.properties == {"read.split.target.size": "134217728"} |
| | assert new_metadata.properties == {"read.split.target.size": "123", "test_a": "test_a", "test_b": "test_b"} |
| |
|
| | new_metadata_add_only = update_table_metadata(new_metadata, (SetPropertiesUpdate(updates={"test_c": "test_c"}),)) |
| |
|
| | assert new_metadata_add_only.properties == { |
| | "read.split.target.size": "123", |
| | "test_a": "test_a", |
| | "test_b": "test_b", |
| | "test_c": "test_c", |
| | } |
| |
|
| |
|
| | def test_apply_remove_properties_update(table_v2: Table) -> None: |
| | base_metadata = update_table_metadata( |
| | table_v2.metadata, |
| | (SetPropertiesUpdate(updates={"test_a": "test_a", "test_b": "test_b", "test_c": "test_c", "test_d": "test_d"}),), |
| | ) |
| |
|
| | new_metadata_no_removal = update_table_metadata(base_metadata, (RemovePropertiesUpdate(removals=[]),)) |
| |
|
| | assert base_metadata == new_metadata_no_removal |
| |
|
| | new_metadata = update_table_metadata(base_metadata, (RemovePropertiesUpdate(removals=["test_a", "test_c"]),)) |
| |
|
| | assert base_metadata.properties == { |
| | "read.split.target.size": "134217728", |
| | "test_a": "test_a", |
| | "test_b": "test_b", |
| | "test_c": "test_c", |
| | "test_d": "test_d", |
| | } |
| | assert new_metadata.properties == {"read.split.target.size": "134217728", "test_b": "test_b", "test_d": "test_d"} |
| |
|
| |
|
| | def test_apply_add_schema_update(table_v2: Table) -> None: |
| | transaction = table_v2.transaction() |
| | update = transaction.update_schema() |
| | update.add_column(path="b", field_type=IntegerType()) |
| | update.commit() |
| |
|
| | test_context = _TableMetadataUpdateContext() |
| |
|
| | new_table_metadata = _apply_table_update(transaction._updates[0], base_metadata=table_v2.metadata, context=test_context) |
| | assert len(new_table_metadata.schemas) == 3 |
| | assert new_table_metadata.current_schema_id == 1 |
| | assert len(test_context._updates) == 1 |
| | assert test_context._updates[0] == transaction._updates[0] |
| | assert test_context.is_added_schema(2) |
| |
|
| | new_table_metadata = _apply_table_update(transaction._updates[1], base_metadata=new_table_metadata, context=test_context) |
| | assert len(new_table_metadata.schemas) == 3 |
| | assert new_table_metadata.current_schema_id == 2 |
| | assert len(test_context._updates) == 2 |
| | assert test_context._updates[1] == transaction._updates[1] |
| | assert test_context.is_added_schema(2) |
| |
|
| |
|
| | def test_update_metadata_table_schema(table_v2: Table) -> None: |
| | transaction = table_v2.transaction() |
| | update = transaction.update_schema() |
| | update.add_column(path="b", field_type=IntegerType()) |
| | update.commit() |
| | new_metadata = update_table_metadata(table_v2.metadata, transaction._updates) |
| | apply_schema: Schema = next(schema for schema in new_metadata.schemas if schema.schema_id == 2) |
| | assert len(apply_schema.fields) == 4 |
| |
|
| | assert apply_schema == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | NestedField(field_id=4, name="b", field_type=IntegerType(), required=False), |
| | identifier_field_ids=[1, 2], |
| | ) |
| | assert apply_schema.schema_id == 2 |
| | assert apply_schema.highest_field_id == 4 |
| |
|
| | assert new_metadata.current_schema_id == 2 |
| |
|
| |
|
| | def test_update_metadata_add_snapshot(table_v2: Table) -> None: |
| | new_snapshot = Snapshot( |
| | snapshot_id=25, |
| | parent_snapshot_id=19, |
| | sequence_number=200, |
| | timestamp_ms=1602638573590, |
| | manifest_list="s3:/a/b/c.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=3, |
| | ) |
| |
|
| | new_metadata = update_table_metadata(table_v2.metadata, (AddSnapshotUpdate(snapshot=new_snapshot),)) |
| | assert len(new_metadata.snapshots) == 3 |
| | assert new_metadata.snapshots[-1] == new_snapshot |
| | assert new_metadata.last_sequence_number == new_snapshot.sequence_number |
| | assert new_metadata.last_updated_ms == new_snapshot.timestamp_ms |
| |
|
| |
|
| | def test_update_metadata_set_snapshot_ref(table_v2: Table) -> None: |
| | update = SetSnapshotRefUpdate( |
| | ref_name="main", |
| | type="branch", |
| | snapshot_id=3051729675574597004, |
| | max_ref_age_ms=123123123, |
| | max_snapshot_age_ms=12312312312, |
| | min_snapshots_to_keep=1, |
| | ) |
| |
|
| | new_metadata = update_table_metadata(table_v2.metadata, (update,)) |
| | assert len(new_metadata.snapshot_log) == 3 |
| | assert new_metadata.snapshot_log[2].snapshot_id == 3051729675574597004 |
| | assert new_metadata.current_snapshot_id == 3051729675574597004 |
| | assert new_metadata.last_updated_ms > table_v2.metadata.last_updated_ms |
| | assert new_metadata.refs[update.ref_name] == SnapshotRef( |
| | snapshot_id=3051729675574597004, |
| | snapshot_ref_type="branch", |
| | min_snapshots_to_keep=1, |
| | max_snapshot_age_ms=12312312312, |
| | max_ref_age_ms=123123123, |
| | ) |
| |
|
| |
|
| | def test_update_metadata_add_update_sort_order(table_v2: Table) -> None: |
| | new_sort_order = SortOrder(order_id=table_v2.sort_order().order_id + 1) |
| | new_metadata = update_table_metadata( |
| | table_v2.metadata, |
| | (AddSortOrderUpdate(sort_order=new_sort_order), SetDefaultSortOrderUpdate(sort_order_id=-1)), |
| | ) |
| | assert len(new_metadata.sort_orders) == 2 |
| | assert new_metadata.sort_orders[-1] == new_sort_order |
| | assert new_metadata.default_sort_order_id == new_sort_order.order_id |
| |
|
| |
|
| | def test_update_metadata_update_sort_order_invalid(table_v2: Table) -> None: |
| | with pytest.raises(ValueError, match="Cannot set current sort order to the last added one when no sort order has been added"): |
| | update_table_metadata(table_v2.metadata, (SetDefaultSortOrderUpdate(sort_order_id=-1),)) |
| |
|
| | invalid_order_id = 10 |
| | with pytest.raises(ValueError, match=f"Sort order with id {invalid_order_id} does not exist"): |
| | update_table_metadata(table_v2.metadata, (SetDefaultSortOrderUpdate(sort_order_id=invalid_order_id),)) |
| |
|
| |
|
| | def test_update_metadata_with_multiple_updates(table_v1: Table) -> None: |
| | base_metadata = table_v1.metadata |
| | transaction = table_v1.transaction() |
| | transaction.upgrade_table_version(format_version=2) |
| |
|
| | schema_update_1 = transaction.update_schema() |
| | schema_update_1.add_column(path="b", field_type=IntegerType()) |
| | schema_update_1.commit() |
| |
|
| | transaction.set_properties(owner="test", test_a="test_a", test_b="test_b", test_c="test_c") |
| |
|
| | test_updates = transaction._updates |
| |
|
| | new_snapshot = Snapshot( |
| | snapshot_id=25, |
| | parent_snapshot_id=19, |
| | sequence_number=200, |
| | timestamp_ms=1602638573590, |
| | manifest_list="s3:/a/b/c.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=3, |
| | ) |
| |
|
| | test_updates += ( |
| | AddSnapshotUpdate(snapshot=new_snapshot), |
| | SetPropertiesUpdate(updates={"test_a": "test_a1"}), |
| | SetSnapshotRefUpdate( |
| | ref_name="main", |
| | type="branch", |
| | snapshot_id=25, |
| | max_ref_age_ms=123123123, |
| | max_snapshot_age_ms=12312312312, |
| | min_snapshots_to_keep=1, |
| | ), |
| | RemovePropertiesUpdate(removals=["test_c", "test_b"]), |
| | ) |
| |
|
| | new_metadata = update_table_metadata(base_metadata, test_updates) |
| | |
| | new_metadata = TableMetadataUtil.parse_obj(copy(new_metadata.model_dump())) |
| |
|
| | |
| | assert new_metadata.format_version == 2 |
| | assert isinstance(new_metadata, TableMetadataV2) |
| |
|
| | |
| | assert len(new_metadata.schemas) == 2 |
| | assert new_metadata.current_schema_id == 1 |
| | assert new_metadata.schema_by_id(new_metadata.current_schema_id).highest_field_id == 4 |
| |
|
| | |
| | assert len(new_metadata.snapshots) == 2 |
| | assert new_metadata.snapshots[-1] == new_snapshot |
| | assert new_metadata.last_sequence_number == new_snapshot.sequence_number |
| | assert new_metadata.last_updated_ms == new_snapshot.timestamp_ms |
| |
|
| | |
| | assert len(new_metadata.snapshot_log) == 1 |
| | assert new_metadata.snapshot_log[0].snapshot_id == 25 |
| | assert new_metadata.current_snapshot_id == 25 |
| | assert new_metadata.last_updated_ms == 1602638573590 |
| | assert new_metadata.refs["main"] == SnapshotRef( |
| | snapshot_id=25, |
| | snapshot_ref_type="branch", |
| | min_snapshots_to_keep=1, |
| | max_snapshot_age_ms=12312312312, |
| | max_ref_age_ms=123123123, |
| | ) |
| |
|
| | |
| | assert new_metadata.properties == {"owner": "test", "test_a": "test_a1"} |
| |
|
| |
|
| | def test_metadata_isolation_from_illegal_updates(table_v1: Table) -> None: |
| | base_metadata = table_v1.metadata |
| | base_metadata_backup = base_metadata.model_copy(deep=True) |
| |
|
| | |
| | transaction = table_v1.transaction() |
| | schema_update_1 = transaction.update_schema() |
| | schema_update_1.add_column(path="b", field_type=IntegerType()) |
| | schema_update_1.commit() |
| | test_updates = transaction._updates |
| | new_snapshot = Snapshot( |
| | snapshot_id=25, |
| | parent_snapshot_id=19, |
| | sequence_number=200, |
| | timestamp_ms=1602638573590, |
| | manifest_list="s3:/a/b/c.avro", |
| | summary=Summary(Operation.APPEND), |
| | schema_id=3, |
| | ) |
| | test_updates += ( |
| | AddSnapshotUpdate(snapshot=new_snapshot), |
| | SetSnapshotRefUpdate( |
| | ref_name="main", |
| | type="branch", |
| | snapshot_id=25, |
| | max_ref_age_ms=123123123, |
| | max_snapshot_age_ms=12312312312, |
| | min_snapshots_to_keep=1, |
| | ), |
| | ) |
| | new_metadata = update_table_metadata(base_metadata, test_updates) |
| |
|
| | |
| | assert base_metadata == base_metadata_backup |
| |
|
| | |
| | |
| | |
| | new_metadata.partition_specs.append(PartitionSpec(spec_id=0)) |
| | assert len(new_metadata.partition_specs) == 2 |
| |
|
| | |
| | assert len(base_metadata.partition_specs) == 1 |
| |
|
| |
|
| | def test_generate_snapshot_id(table_v2: Table) -> None: |
| | assert isinstance(_generate_snapshot_id(), int) |
| | assert isinstance(table_v2.metadata.new_snapshot_id(), int) |
| |
|
| |
|
| | def test_assert_create(table_v2: Table) -> None: |
| | AssertCreate().validate(None) |
| |
|
| | with pytest.raises(CommitFailedException, match="Table already exists"): |
| | AssertCreate().validate(table_v2.metadata) |
| |
|
| |
|
| | def test_assert_table_uuid(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertTableUUID(uuid=base_metadata.table_uuid).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertTableUUID(uuid=uuid.UUID("9c12d441-03fe-4693-9a96-a0705ddf69c2")).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Table UUID does not match: 9c12d441-03fe-4693-9a96-a0705ddf69c2 != 9c12d441-03fe-4693-9a96-a0705ddf69c1", |
| | ): |
| | AssertTableUUID(uuid=uuid.UUID("9c12d441-03fe-4693-9a96-a0705ddf69c2")).validate(base_metadata) |
| |
|
| |
|
| | def test_assert_ref_snapshot_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertRefSnapshotId(ref="main", snapshot_id=base_metadata.current_snapshot_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertRefSnapshotId(ref="main", snapshot_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: branch main was created concurrently", |
| | ): |
| | AssertRefSnapshotId(ref="main", snapshot_id=None).validate(base_metadata) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: branch main has changed: expected id 1, found 3055729675574597004", |
| | ): |
| | AssertRefSnapshotId(ref="main", snapshot_id=1).validate(base_metadata) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: branch or tag not_exist is missing, expected 1", |
| | ): |
| | AssertRefSnapshotId(ref="not_exist", snapshot_id=1).validate(base_metadata) |
| |
|
| |
|
| | def test_assert_last_assigned_field_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertLastAssignedFieldId(last_assigned_field_id=base_metadata.last_column_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertLastAssignedFieldId(last_assigned_field_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: last assigned field id has changed: expected 1, found 3", |
| | ): |
| | AssertLastAssignedFieldId(last_assigned_field_id=1).validate(base_metadata) |
| |
|
| |
|
| | def test_assert_current_schema_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertCurrentSchemaId(current_schema_id=base_metadata.current_schema_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertCurrentSchemaId(current_schema_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: current schema id has changed: expected 2, found 1", |
| | ): |
| | AssertCurrentSchemaId(current_schema_id=2).validate(base_metadata) |
| |
|
| |
|
| | def test_last_assigned_partition_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertLastAssignedPartitionId(last_assigned_partition_id=base_metadata.last_partition_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertLastAssignedPartitionId(last_assigned_partition_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: last assigned partition id has changed: expected 1, found 1000", |
| | ): |
| | AssertLastAssignedPartitionId(last_assigned_partition_id=1).validate(base_metadata) |
| |
|
| |
|
| | def test_assert_default_spec_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertDefaultSpecId(default_spec_id=base_metadata.default_spec_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertDefaultSpecId(default_spec_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: default spec id has changed: expected 1, found 0", |
| | ): |
| | AssertDefaultSpecId(default_spec_id=1).validate(base_metadata) |
| |
|
| |
|
| | def test_assert_default_sort_order_id(table_v2: Table) -> None: |
| | base_metadata = table_v2.metadata |
| | AssertDefaultSortOrderId(default_sort_order_id=base_metadata.default_sort_order_id).validate(base_metadata) |
| |
|
| | with pytest.raises(CommitFailedException, match="Requirement failed: current table metadata is missing"): |
| | AssertDefaultSortOrderId(default_sort_order_id=1).validate(None) |
| |
|
| | with pytest.raises( |
| | CommitFailedException, |
| | match="Requirement failed: default sort order id has changed: expected 1, found 3", |
| | ): |
| | AssertDefaultSortOrderId(default_sort_order_id=1).validate(base_metadata) |
| |
|
| |
|
| | def test_correct_schema() -> None: |
| | table_metadata = TableMetadataV2(**{ |
| | "format-version": 2, |
| | "table-uuid": "9c12d441-03fe-4693-9a96-a0705ddf69c1", |
| | "location": "s3://bucket/test/location", |
| | "last-sequence-number": 34, |
| | "last-updated-ms": 1602638573590, |
| | "last-column-id": 3, |
| | "current-schema-id": 1, |
| | "schemas": [ |
| | {"type": "struct", "schema-id": 0, "fields": [{"id": 1, "name": "x", "required": True, "type": "long"}]}, |
| | { |
| | "type": "struct", |
| | "schema-id": 1, |
| | "identifier-field-ids": [1, 2], |
| | "fields": [ |
| | {"id": 1, "name": "x", "required": True, "type": "long"}, |
| | {"id": 2, "name": "y", "required": True, "type": "long"}, |
| | {"id": 3, "name": "z", "required": True, "type": "long"}, |
| | ], |
| | }, |
| | ], |
| | "default-spec-id": 0, |
| | "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], |
| | "last-partition-id": 1000, |
| | "default-sort-order-id": 0, |
| | "sort-orders": [], |
| | "current-snapshot-id": 123, |
| | "snapshots": [ |
| | { |
| | "snapshot-id": 234, |
| | "timestamp-ms": 1515100955770, |
| | "sequence-number": 0, |
| | "summary": {"operation": "append"}, |
| | "manifest-list": "s3://a/b/1.avro", |
| | "schema-id": 10, |
| | }, |
| | { |
| | "snapshot-id": 123, |
| | "timestamp-ms": 1515100955770, |
| | "sequence-number": 0, |
| | "summary": {"operation": "append"}, |
| | "manifest-list": "s3://a/b/1.avro", |
| | "schema-id": 0, |
| | }, |
| | ], |
| | }) |
| |
|
| | t = Table( |
| | identifier=("default", "t1"), |
| | metadata=table_metadata, |
| | metadata_location="s3://../..", |
| | io=load_file_io(), |
| | catalog=NoopCatalog("NoopCatalog"), |
| | ) |
| |
|
| | |
| | projection_schema = t.scan().projection() |
| | assert projection_schema == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | NestedField(field_id=2, name="y", field_type=LongType(), required=True), |
| | NestedField(field_id=3, name="z", field_type=LongType(), required=True), |
| | identifier_field_ids=[1, 2], |
| | ) |
| | assert projection_schema.schema_id == 1 |
| |
|
| | |
| | projection_schema = t.scan(snapshot_id=123).projection() |
| | assert projection_schema == Schema( |
| | NestedField(field_id=1, name="x", field_type=LongType(), required=True), |
| | identifier_field_ids=[], |
| | ) |
| | assert projection_schema.schema_id == 0 |
| |
|
| | with pytest.warns(UserWarning, match="Metadata does not contain schema with id: 10"): |
| | t.scan(snapshot_id=234).projection() |
| |
|
| | |
| | with pytest.raises(ValueError) as exc_info: |
| | _ = t.scan(snapshot_id=-1).projection() |
| |
|
| | assert "Snapshot not found: -1" in str(exc_info.value) |
| |
|
| |
|
| | def test_schema_mismatch_type(table_schema_simple: Schema) -> None: |
| | other_schema = pa.schema(( |
| | pa.field("foo", pa.string(), nullable=True), |
| | pa.field("bar", pa.decimal128(18, 6), nullable=False), |
| | pa.field("baz", pa.bool_(), nullable=True), |
| | )) |
| |
|
| | expected = r"""Mismatch in fields: |
| | ┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ |
| | ┃ ┃ Table field ┃ Dataframe field ┃ |
| | ┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ |
| | │ ✅ │ 1: foo: optional string │ 1: foo: optional string │ |
| | │ ❌ │ 2: bar: required int │ 2: bar: required decimal\(18, 6\) │ |
| | │ ✅ │ 3: baz: optional boolean │ 3: baz: optional boolean │ |
| | └────┴──────────────────────────┴─────────────────────────────────┘ |
| | """ |
| |
|
| | with pytest.raises(ValueError, match=expected): |
| | _check_schema_compatible(table_schema_simple, other_schema) |
| |
|
| |
|
| | def test_schema_mismatch_nullability(table_schema_simple: Schema) -> None: |
| | other_schema = pa.schema(( |
| | pa.field("foo", pa.string(), nullable=True), |
| | pa.field("bar", pa.int32(), nullable=True), |
| | pa.field("baz", pa.bool_(), nullable=True), |
| | )) |
| |
|
| | expected = """Mismatch in fields: |
| | ┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┓ |
| | ┃ ┃ Table field ┃ Dataframe field ┃ |
| | ┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━┩ |
| | │ ✅ │ 1: foo: optional string │ 1: foo: optional string │ |
| | │ ❌ │ 2: bar: required int │ 2: bar: optional int │ |
| | │ ✅ │ 3: baz: optional boolean │ 3: baz: optional boolean │ |
| | └────┴──────────────────────────┴──────────────────────────┘ |
| | """ |
| |
|
| | with pytest.raises(ValueError, match=expected): |
| | _check_schema_compatible(table_schema_simple, other_schema) |
| |
|
| |
|
| | def test_schema_mismatch_missing_field(table_schema_simple: Schema) -> None: |
| | other_schema = pa.schema(( |
| | pa.field("foo", pa.string(), nullable=True), |
| | pa.field("baz", pa.bool_(), nullable=True), |
| | )) |
| |
|
| | expected = """Mismatch in fields: |
| | ┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┓ |
| | ┃ ┃ Table field ┃ Dataframe field ┃ |
| | ┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━┩ |
| | │ ✅ │ 1: foo: optional string │ 1: foo: optional string │ |
| | │ ❌ │ 2: bar: required int │ Missing │ |
| | │ ✅ │ 3: baz: optional boolean │ 3: baz: optional boolean │ |
| | └────┴──────────────────────────┴──────────────────────────┘ |
| | """ |
| |
|
| | with pytest.raises(ValueError, match=expected): |
| | _check_schema_compatible(table_schema_simple, other_schema) |
| |
|
| |
|
| | def test_schema_mismatch_additional_field(table_schema_simple: Schema) -> None: |
| | other_schema = pa.schema(( |
| | pa.field("foo", pa.string(), nullable=True), |
| | pa.field("bar", pa.int32(), nullable=True), |
| | pa.field("baz", pa.bool_(), nullable=True), |
| | pa.field("new_field", pa.date32(), nullable=True), |
| | )) |
| |
|
| | expected = r"PyArrow table contains more columns: new_field. Update the schema first \(hint, use union_by_name\)." |
| |
|
| | with pytest.raises(ValueError, match=expected): |
| | _check_schema_compatible(table_schema_simple, other_schema) |
| |
|
| |
|
| | def test_schema_downcast(table_schema_simple: Schema) -> None: |
| | |
| | other_schema = pa.schema(( |
| | pa.field("foo", pa.large_string(), nullable=True), |
| | pa.field("bar", pa.int32(), nullable=False), |
| | pa.field("baz", pa.bool_(), nullable=True), |
| | )) |
| |
|
| | try: |
| | _check_schema_compatible(table_schema_simple, other_schema) |
| | except Exception: |
| | pytest.fail("Unexpected Exception raised when calling `_check_schema`") |
| |
|
| |
|
| | def test_table_properties(example_table_metadata_v2: Dict[str, Any]) -> None: |
| | |
| | for k, v in example_table_metadata_v2["properties"].items(): |
| | assert isinstance(k, str) |
| | assert isinstance(v, str) |
| | metadata = TableMetadataV2(**example_table_metadata_v2) |
| | for k, v in metadata.properties.items(): |
| | assert isinstance(k, str) |
| | assert isinstance(v, str) |
| |
|
| | |
| | property_with_int = {"property_name": 42} |
| | new_example_table_metadata_v2 = {**example_table_metadata_v2, "properties": property_with_int} |
| | assert isinstance(new_example_table_metadata_v2["properties"]["property_name"], int) |
| | new_metadata = TableMetadataV2(**new_example_table_metadata_v2) |
| | assert isinstance(new_metadata.properties["property_name"], str) |
| |
|
| |
|
| | def test_table_properties_raise_for_none_value(example_table_metadata_v2: Dict[str, Any]) -> None: |
| | property_with_none = {"property_name": None} |
| | example_table_metadata_v2 = {**example_table_metadata_v2, "properties": property_with_none} |
| | with pytest.raises(ValidationError) as exc_info: |
| | TableMetadataV2(**example_table_metadata_v2) |
| | assert "None type is not a supported value in properties: property_name" in str(exc_info.value) |
| |
|
| |
|
| | def test_serialize_commit_table_request() -> None: |
| | request = CommitTableRequest( |
| | requirements=(AssertTableUUID(uuid="4bfd18a3-74c6-478e-98b1-71c4c32f4163"),), |
| | identifier=TableIdentifier(namespace=["a"], name="b"), |
| | ) |
| |
|
| | deserialized_request = CommitTableRequest.model_validate_json(request.model_dump_json()) |
| | assert request == deserialized_request |
| |
|
| |
|
| | def test_partition_for_demo() -> None: |
| | import pyarrow as pa |
| |
|
| | test_pa_schema = pa.schema([("year", pa.int64()), ("n_legs", pa.int64()), ("animal", pa.string())]) |
| | test_schema = Schema( |
| | NestedField(field_id=1, name="year", field_type=StringType(), required=False), |
| | NestedField(field_id=2, name="n_legs", field_type=IntegerType(), required=True), |
| | NestedField(field_id=3, name="animal", field_type=StringType(), required=False), |
| | schema_id=1, |
| | ) |
| | test_data = { |
| | "year": [2020, 2022, 2022, 2022, 2021, 2022, 2022, 2019, 2021], |
| | "n_legs": [2, 2, 2, 4, 4, 4, 4, 5, 100], |
| | "animal": ["Flamingo", "Parrot", "Parrot", "Horse", "Dog", "Horse", "Horse", "Brittle stars", "Centipede"], |
| | } |
| | arrow_table = pa.Table.from_pydict(test_data, schema=test_pa_schema) |
| | partition_spec = PartitionSpec( |
| | PartitionField(source_id=2, field_id=1002, transform=IdentityTransform(), name="n_legs_identity"), |
| | PartitionField(source_id=1, field_id=1001, transform=IdentityTransform(), name="year_identity"), |
| | ) |
| | result = _determine_partitions(partition_spec, test_schema, arrow_table) |
| | assert {table_partition.partition_key.partition for table_partition in result} == { |
| | Record(n_legs_identity=2, year_identity=2020), |
| | Record(n_legs_identity=100, year_identity=2021), |
| | Record(n_legs_identity=4, year_identity=2021), |
| | Record(n_legs_identity=4, year_identity=2022), |
| | Record(n_legs_identity=2, year_identity=2022), |
| | Record(n_legs_identity=5, year_identity=2019), |
| | } |
| | assert ( |
| | pa.concat_tables([table_partition.arrow_table_partition for table_partition in result]).num_rows == arrow_table.num_rows |
| | ) |
| |
|
| |
|
| | def test_identity_partition_on_multi_columns() -> None: |
| | import pyarrow as pa |
| |
|
| | test_pa_schema = pa.schema([("born_year", pa.int64()), ("n_legs", pa.int64()), ("animal", pa.string())]) |
| | test_schema = Schema( |
| | NestedField(field_id=1, name="born_year", field_type=StringType(), required=False), |
| | NestedField(field_id=2, name="n_legs", field_type=IntegerType(), required=True), |
| | NestedField(field_id=3, name="animal", field_type=StringType(), required=False), |
| | schema_id=1, |
| | ) |
| | |
| | test_rows = [ |
| | (2021, 4, "Dog"), |
| | (2022, 4, "Horse"), |
| | (2022, 4, "Another Horse"), |
| | (2021, 100, "Centipede"), |
| | (None, 4, "Kirin"), |
| | (2021, None, "Fish"), |
| | ] * 2 |
| | expected = {Record(n_legs_identity=test_rows[i][1], year_identity=test_rows[i][0]) for i in range(len(test_rows))} |
| | partition_spec = PartitionSpec( |
| | PartitionField(source_id=2, field_id=1002, transform=IdentityTransform(), name="n_legs_identity"), |
| | PartitionField(source_id=1, field_id=1001, transform=IdentityTransform(), name="year_identity"), |
| | ) |
| | import random |
| |
|
| | |
| | for _ in range(1000): |
| | random.shuffle(test_rows) |
| | test_data = { |
| | "born_year": [row[0] for row in test_rows], |
| | "n_legs": [row[1] for row in test_rows], |
| | "animal": [row[2] for row in test_rows], |
| | } |
| | arrow_table = pa.Table.from_pydict(test_data, schema=test_pa_schema) |
| |
|
| | result = _determine_partitions(partition_spec, test_schema, arrow_table) |
| |
|
| | assert {table_partition.partition_key.partition for table_partition in result} == expected |
| | concatenated_arrow_table = pa.concat_tables([table_partition.arrow_table_partition for table_partition in result]) |
| | assert concatenated_arrow_table.num_rows == arrow_table.num_rows |
| | assert concatenated_arrow_table.sort_by([ |
| | ("born_year", "ascending"), |
| | ("n_legs", "ascending"), |
| | ("animal", "ascending"), |
| | ]) == arrow_table.sort_by([("born_year", "ascending"), ("n_legs", "ascending"), ("animal", "ascending")]) |
| |
|