repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
rt-phb/Spooq | src/spooq2/__init__.py | <gh_stars>0
# from __future__ import absolute_import
from . import spooq2_logger
from spooq2.pipeline import PipelineFactory
if not spooq2_logger.initialized:
spooq2_logger.initialize()
__all__ = ["extractor", "transformer", "loader", "pipeline", "PipelineFactory"]
|
rt-phb/Spooq | tests/helpers/skip_conditions.py | import pytest
from pyspark.sql import SparkSession
import pytest_spark
# warm up spark_session to be able to import Spark functions
spark_conf = pytest_spark.config.SparkConfigBuilder.initialize()
spark_session = SparkSession.builder.config(conf=spark_conf).getOrCreate()
only_spark2 = pytest.mark.skipif(
spark_session.version.split(".")[0] != "2", reason="This test supports only Spark 2"
)
only_spark3 = pytest.mark.skipif(
spark_session.version.split(".")[0] != "3", reason="This test supports only Spark 3"
)
|
rt-phb/Spooq | src/spooq2/transformer/mapper.py | from __future__ import absolute_import
from builtins import str
from pyspark.sql.utils import AnalysisException
from pyspark.sql import functions as F
from pyspark.sql import types as T
from pyspark.sql.column import Column
from .transformer import Transformer
from .mapper_custom_data_types import _get_select_expression_for_custom_type
class Mapper(Transformer):
"""
Constructs and applies a PySpark SQL expression, based on the provided mapping.
Examples
--------
>>> from pyspark.sql import functions as F
>>> from spooq2.transformer import Mapper
>>>
>>> mapping = [
>>> ("id", "data.relationships.food.data.id", "StringType"),
>>> ("version", "data.version", "extended_string_to_int"),
>>> ("type", "elem.attributes.type", "StringType"),
>>> ("created_at", "elem.attributes.created_at", "extended_string_to_timestamp"),
>>> ("created_on", "elem.attributes.created_at", "extended_string_to_date"),
>>> ("process_date", F.current_timestamp(), "DateType"),
>>> ]
>>> mapper = Mapper(mapping=mapping)
>>> mapper.transform(input_df).printSchema()
root
|-- id: string (nullable = true)
|-- version: integer (nullable = true)
|-- type: string (nullable = true)
|-- created_at: timestamp (nullable = true)
|-- created_on: date (nullable = true)
|-- process_date: date (nullable = false)
Parameters
----------
mapping : :class:`list` of :any:`tuple` containing three :any:`str` or :class:`~pyspark.sql.Column` or :mod:`~pyspark.sql.functions`
This is the main parameter for this transformation. It gives information
about the column names for the output DataFrame, the column names (paths)
from the input DataFrame, and their data types. Custom data types are also supported, which can
clean, pivot, anonymize, ... the data itself. Please have a look at the
:py:mod:`spooq2.transformer.mapper_custom_data_types` module for more information.
ignore_missing_columns : :any:`bool`, Defaults to False
Specifies if the mapping transformation should use NULL if a referenced input
column is missing in the provided DataFrame. If set to False, it will raise an exception.
ignore_ambiguous_columns : :any:`bool`, Defaults to False
It can happen that the input DataFrame has ambiguous column names (like "Key" vs "key") which will
raise an exception with Spark when reading. This flag surpresses this exception and skips those affected
columns.
mode : :any:`str`, Defaults to "replace"
Defines weather the mapping should fully replace the schema of the input DataFrame or just add to it.
Following modes are supported:
* replace
The output schema is the same as the provided mapping.
=> output schema: new columns
* append
The columns provided in the mapping are added at the end of the input schema. If a column already
exists in the input DataFrame, its position is kept.
=> output schema: input columns + new columns
* prepend
The columns provided in the mapping are added at the beginning of the input schema. If a column already
exists in the input DataFrame, its position is kept.
=> output schema: new columns + input columns
Note
----
Let's talk about Mappings:
The mapping should be a list of tuples that contain all necessary information per column.
* Column Name: :any:`str`
Sets the name of the column in the resulting output DataFrame.
* Source Path / Name / Column / Function: :any:`str` or :class:`~pyspark.sql.Column` or :mod:`~pyspark.sql.functions`
Points to the name of the column in the input DataFrame. If the input
is a flat DataFrame, it will essentially be the column name. If it is of complex
type, it will point to the path of the actual value. For example: ``data.relationships.sample.data.id``,
where id is the value we want. It is also possible to directly pass
a PySpark Column which will get evaluated. This can contain arbitrary logic supported by Spark. For example:
``F.current_date()`` or ``F.when(F.col("size") == 180, F.lit("tall")).otherwise(F.lit("tiny"))``.
* DataType: :any:`str` or :class:`~pyspark.sql.types.DataType`
DataTypes can be types from :any:`pyspark.sql.types`, selected custom datatypes or
injected, ad-hoc custom datatypes.
The datatype will be interpreted as a PySpark built-in if it is a member of :any:`pyspark.sql.types` module.
If it is not an importable PySpark data type, a method to construct the statement will be
called by the data type's name.
Note
----
The available input columns can vary from batch to batch if you use schema inference
(f.e. on json data) for the extraction. Ignoring missing columns on the input DataFrame is
highly encouraged in this case. Although, if you have tight control over the structure
of the extracted DataFrame, setting `ignore_missing_columns` to True is advised
as it can uncover typos and bugs.
Note
----
Please see :py:mod:`spooq2.transformer.mapper_custom_data_types` for all available custom
data types and how to inject your own.
Note
----
Attention: Decimal is NOT SUPPORTED by Hive! Please use Double instead!
"""
def __init__(self, mapping, ignore_missing_columns=False, ignore_ambiguous_columns=False, mode="replace"):
super(Mapper, self).__init__()
self.mapping = mapping
self.ignore_missing_columns = ignore_missing_columns
self.ignore_ambiguous_columns = ignore_ambiguous_columns
self.mode = mode
def transform(self, input_df):
self.logger.info("Generating SQL Select-Expression for Mapping...")
self.logger.debug("Input Schema/Mapping:")
self.logger.debug("\n" + "\n".join(["\t".join(map(str, mapping_line)) for mapping_line in self.mapping]))
input_columns = input_df.columns
select_expressions = []
with_column_expressions = []
for (name, source_column, data_type) in self.mapping:
self.logger.debug(
"generating Select statement for attribute: {nm}".format(nm=name)
)
source_column = self._get_spark_column(source_column, name, input_df)
if source_column is None:
continue
data_type, data_type_is_spark_builtin = self._get_spark_data_type(data_type)
select_expression = self._get_select_expression(name, source_column, data_type, data_type_is_spark_builtin)
self.logger.debug("Select-Expression for Attribute {nm}: {sql_expr}"
.format(nm=name, sql_expr=str(select_expression)))
if self.mode != "replace" and name in input_columns:
with_column_expressions.append((name, select_expression))
else:
select_expressions.append(select_expression)
self.logger.info("SQL Select-Expression for new mapping generated!")
self.logger.debug("SQL Select-Expressions for new mapping:\n" + "\n".join(str(select_expressions).split(",")))
self.logger.debug("SQL WithColumn-Expressions for new mapping: " + str(with_column_expressions))
if self.mode == "prepend":
df_to_return = input_df.select(select_expressions + ["*"])
elif self.mode == "append":
df_to_return = input_df.select(["*"] + select_expressions)
elif self.mode == "replace":
df_to_return = input_df.select(select_expressions)
else:
exception_message = ("Only 'prepend', 'append' and 'replace' are allowed for Mapper mode!"
"Value: '{val}' was used as mode for the Mapper transformer.")
self.logger.exception(exception_message)
raise ValueError(exception_message)
if with_column_expressions:
for name, expression in with_column_expressions:
df_to_return = df_to_return.withColumn(name, expression)
return df_to_return
def _get_spark_column(self, source_column, name, input_df):
"""
Returns the provided source column as a Pyspark.sql.Column and marks if it is missing or not.
Supports source column definition as a string or a Pyspark.sql.Column (including functions).
"""
try:
input_df.select(source_column)
if isinstance(source_column, str):
source_column = F.col(source_column)
except AnalysisException as e:
if isinstance(source_column, str) and self.ignore_missing_columns:
self.logger.warn(f"Missing column ({str(source_column)}) replaced with NULL (via ignore_missing_columns=True): {e.desc}")
source_column = F.lit(None)
elif e.desc.startswith("Ambiguous reference to fields") and self.ignore_ambiguous_columns:
self.logger.warn(f"Exception ignored (via ignore_ambiguous_columns=True) for column \"{str(source_column)}\": {e.desc}")
return None
else:
self.logger.exception(
"Column: \"{}\" cannot be resolved ".format(str(source_column)) +
"but is referenced in the mapping by column: \"{}\".\n".format(name))
raise e
return source_column
@staticmethod
def _get_spark_data_type(data_type):
"""
Returns the provided data_type as a Pyspark.sql.type.DataType (for spark-built-ins)
or as a string (for custom spooq transformations) and marks if it is built-in or not.
Supports source column definition as a string or a Pyspark.sql.Column (including functions).
"""
if isinstance(data_type, T.DataType):
data_type_is_spark_builtin = True
elif isinstance(data_type, str):
data_type = data_type.replace("()", "")
if hasattr(T, data_type):
data_type_is_spark_builtin = True
data_type = getattr(T, data_type)()
else:
data_type_is_spark_builtin = False
else:
raise ValueError(
"data_type not supported! class: \"{}\", name: \"{}\"".format(
type(data_type).__name__, str(data_type)))
return data_type, data_type_is_spark_builtin
@staticmethod
def _get_select_expression(name, source_column, data_type,
data_type_is_spark_builtin):
"""
Returns a valid pyspark sql select-expression with cast and alias, depending on the input parameters.
"""
if data_type_is_spark_builtin:
return source_column.cast(data_type).alias(name)
else: # Custom Data Type
return _get_select_expression_for_custom_type(source_column, name, data_type)
|
rt-phb/Spooq | src/spooq2/transformer/threshold_cleaner.py | from __future__ import absolute_import
import sys
if sys.version_info.major > 2:
# This is needed for python 2 as otherwise pyspark raises an exception for following command:
# data_type = input_df.schema[str(column_name)].dataType
# Pyspark checks if the input is a string, which does not work
# with the new strings from builtins
from builtins import str
import pyspark.sql.functions as F
import pyspark.sql.types as sql_types
from pyspark.sql.column import Column
from .transformer import Transformer
class ThresholdCleaner(Transformer):
"""
Sets outiers within a DataFrame to a default value.
Takes a dictionary with valid value ranges for each column to be cleaned.
Example
-------
>>> transformer = ThresholdCleaner(
>>> thresholds={
>>> "created_at": {
>>> "min": 0,
>>> "max": 1580737513,
>>> "default": pyspark.sql.functions.current_date()
>>> },
>>> "size_cm": {
>>> "min": 70,
>>> "max": 250,
>>> "default": None
>>> },
>>> }
>>> )
Parameters
----------
thresholds : :py:class:`dict`
Dictionary containing column names and respective valid ranges
Returns
-------
:any:`pyspark.sql.DataFrame`
The transformed DataFrame
Raises
------
:any:`exceptions.ValueError`
Threshold-based cleaning only supports Numeric, Date and Timestamp Types!
Column with name: {col_name} and type of: {col_type} was provided
Warning
-------
Only Numeric, TimestampType, and DateType data types are supported!
"""
def __init__(self, thresholds={}):
super(ThresholdCleaner, self).__init__()
self.thresholds = thresholds
self.logger.debug("Range Definitions: " + str(self.thresholds))
def transform(self, input_df):
self.logger.debug("input_df Schema: " + input_df._jdf.schema().treeString())
ordered_column_names = input_df.columns
for column_name, value_range in list(self.thresholds.items()):
data_type = input_df.schema[str(column_name)].dataType
substitute = value_range.get("default", None)
if not isinstance(substitute, Column):
substitute = F.lit(substitute)
if not isinstance(data_type, (sql_types.NumericType,
sql_types.DateType,
sql_types.TimestampType)):
raise ValueError(
"Threshold-based cleaning only supports Numeric, Date and Timestamp Types!\n",
"Column with name: {col_name} and type of: {col_type} was provided".format(
col_name=column_name, col_type=data_type
),
)
self.logger.debug(
"Ranges for column " + column_name + ": " + str(value_range)
)
input_df = input_df.withColumn(
column_name,
F.when(
input_df[column_name].between(
value_range["min"], value_range["max"]
),
input_df[column_name],
)
.otherwise(substitute)
.cast(data_type),
)
return input_df.select(ordered_column_names)
|
rt-phb/Spooq | tests/data/test_fixtures/__init__.py | from pyspark.sql import SparkSession
import pytest_spark
# warm up spark_session to be able to import Spark functions
spark_conf = pytest_spark.config.SparkConfigBuilder.initialize()
SparkSession.builder.config(conf=spark_conf).getOrCreate()
|
rt-phb/Spooq | src/spooq2/extractor/__init__.py | from .jdbc import JDBCExtractorIncremental, JDBCExtractorFullLoad
from .json_files import JSONExtractor
__all__ = [
"JDBCExtractorIncremental",
"JDBCExtractorFullLoad",
"JSONExtractor",
]
|
rt-phb/Spooq | src/spooq2/loader/hive_loader.py | from __future__ import absolute_import
from past.builtins import basestring
from pyspark.sql import SparkSession
from pyspark.sql import types as sql_types
from pyspark.sql.functions import lit
from .loader import Loader
class HiveLoader(Loader):
"""
Persists a PySpark DataFrame into a Hive Table.
Examples
--------
>>> HiveLoader(
>>> db_name="users_and_friends",
>>> table_name="friends_partitioned",
>>> partition_definitions=[{
>>> "column_name": "dt",
>>> "column_type": "IntegerType",
>>> "default_value": 20200201}],
>>> clear_partition=True,
>>> repartition_size=10,
>>> overwrite_partition_value=False,
>>> auto_create_table=False,
>>> ).load(input_df)
>>> HiveLoader(
>>> db_name="users_and_friends",
>>> table_name="all_friends",
>>> partition_definitions=[],
>>> repartition_size=200,
>>> auto_create_table=True,
>>> ).load(input_df)
Parameters
----------
db_name : :any:`str`
The database name to load the data into.
table_name : :any:`str`
The table name to load the data into. The database name must not be included in this
parameter as it is already defined in the `db_name` parameter.
partition_definitions : :any:`list` of :py:class:`dict`
(Defaults to `[{"column_name": "dt", "column_type": "IntegerType", "default_value": None}]`).
* **column_name** (:any:`str`) - The Column's Name to partition by.
* **column_type** (:any:`str`) - The PySpark SQL DataType for the Partition Value as
a String. This should normally either be 'IntegerType()' or 'StringType()'
* **default_value** (:any:`str` or :any:`int`) - If `column_name` does not contain
a value or `overwrite_partition_value` is set, this value will be used for the
partitioning
clear_partition : :any:`bool`, (Defaults to True)
This flag tells the Loader to delete the defined partitions before
inserting the input DataFrame into the target table. Has no effect if no partitions are
defined.
repartition_size : :any:`int`, (Defaults to 40)
The DataFrame will be repartitioned on Spark level before inserting into the table.
This effects the number of output files on which the Hive table is based.
auto_create_table : :any:`bool`, (Defaults to True)
Whether the target table will be created if it does not yet exist.
overwrite_partition_value : :any:`bool`, (Defaults to True)
Defines whether the values of columns defined in `partition_definitions` should
explicitly set by default_values.
Raises
------
:any:`exceptions.AssertionError`:
partition_definitions has to be a list containing dicts. Expected dict content:
'column_name', 'column_type', 'default_value' per partition_definitions item.
:any:`exceptions.AssertionError`:
Items of partition_definitions have to be dictionaries.
:any:`exceptions.AssertionError`:
No column name set!
:any:`exceptions.AssertionError`:
Not a valid (PySpark) datatype for the partition column {name} | {type}.
:any:`exceptions.AssertionError`:
`clear_partition` is only supported if `overwrite_partition_value` is also enabled.
This would otherwise result in clearing partitions on basis of dynamically values
(from DataFrame) instead of explicitly defining the partition(s) to clear.
"""
def __init__(
self,
db_name,
table_name,
partition_definitions=[{"column_name": "dt", "column_type": "IntegerType", "default_value": None}],
clear_partition=True,
repartition_size=40,
auto_create_table=True,
overwrite_partition_value=True,
):
super(HiveLoader, self).__init__()
self._assert_partition_definitions_is_valid(partition_definitions)
self.partition_definitions = partition_definitions
self.db_name = db_name
self.table_name = table_name
self.full_table_name = db_name + "." + table_name
self.repartition_size = repartition_size
if clear_partition and not overwrite_partition_value:
raise ValueError(
"clear_partition is only supported if overwrite_partition_value is also enabled. ",
"This would otherwise result in clearing partitions on basis of dynamically values",
"(from dataframe) instead of explicitly defining the partition(s) to clear"
)
self.clear_partition = clear_partition
self.overwrite_partition_value = overwrite_partition_value
self.auto_create_table = auto_create_table
self.spark = (
SparkSession.Builder()
.enableHiveSupport()
.appName("spooq2.extractor: {nm}".format(nm=self.name))
.getOrCreate()
)
def _assert_partition_definitions_is_valid(self, definitions):
assert isinstance(definitions, list), (
"partition_definitions has to be a list containing dicts.\n",
"Expected dict content: 'column_name', 'column_type', 'default_value' per partition_definitions item.",
)
for dct in definitions:
assert isinstance(dct, dict), "Items of partition_definitions have to be dictionaries"
assert dct["column_name"], "No column name set!"
assert isinstance(dct["column_type"], basestring) and hasattr(
sql_types, dct["column_type"]
), "Not a valid (PySpark) datatype for the partition column {name} | {type}".format(
name=dct["column_name"], type=dct["column_type"]
)
dct["column_type"] = getattr(sql_types, dct["column_type"])
def load(self, input_df):
self.spark.conf.set("hive.exec.dynamic.partition", "true")
self.spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
input_df = input_df.repartition(self.repartition_size)
input_df = self._add_partition_definition_to_dataframe(input_df)
if self._table_exists():
output_df = self.spark.table(self.full_table_name)
assert input_df.columns == output_df.columns, "Input columns don't match the columns of the Hive table"
if self.clear_partition:
self._clear_hive_partition()
input_df.write.insertInto(self.full_table_name)
elif self.auto_create_table:
input_df.write.partitionBy(*[dct["column_name"] for dct in self.partition_definitions]).saveAsTable(
self.full_table_name
)
else:
raise Exception(
"Table: {tbl} does not exist and `auto_create_table` is set to False".format(tbl=self.full_table_name)
)
def _add_partition_definition_to_dataframe(self, input_df):
for partition_definition in self.partition_definitions:
if partition_definition["column_name"] not in input_df.columns or self.overwrite_partition_value:
assert ("default_value" in list(partition_definition.keys()) and
(partition_definition["default_value"] or partition_definition["default_value"] == 0)
), "No default partition value set for partition column: {name}!\n".format(
name=partition_definition["column_name"]
)
input_df = input_df.withColumn(
partition_definition["column_name"],
lit(partition_definition["default_value"]).cast(partition_definition["column_type"]()),
)
return input_df
def _table_exists(self):
table_exists = False
self.spark.catalog.setCurrentDatabase(self.db_name)
for tbl in self.spark.catalog.listTables():
if self.table_name == tbl.name:
table_exists = True
return table_exists
def _clear_hive_partition(self):
def _construct_partition_query_string(partition_definitions):
partition_queries = []
for dct in partition_definitions:
assert "default_value" in list(dct.keys()), "clear_partitions needs a default_value per partition definition!"
if issubclass(dct["column_type"], sql_types.NumericType):
partition_queries.append("{part} = {dt}".format(part=dct["column_name"], dt=dct["default_value"]))
else:
partition_queries.append("{part} = '{dt}'".format(part=dct["column_name"], dt=dct["default_value"]))
return ", ".join(partition_queries)
partition_query = _construct_partition_query_string(self.partition_definitions)
command = """ALTER TABLE {tbl} DROP IF EXISTS PARTITION ({part_def})""".format(
tbl=self.full_table_name, part_def=partition_query
)
self.logger.debug("Command used to clear Partition: {cmd}".format(cmd=command))
self.spark.sql(command)
|
rt-phb/Spooq | src/spooq2/pipeline/factory.py | <filename>src/spooq2/pipeline/factory.py
"""
To decrease the complexity of building data pipelines for data engineers, an expert system or
business rules engine can be used to automatically build and configure a data pipeline based on
context variables, groomed metadata, and relevant rules.
"""
from __future__ import print_function
from builtins import object
import requests
import json
from spooq2.pipeline import Pipeline
import spooq2.extractor as E
import spooq2.transformer as T
import spooq2.loader as L
class PipelineFactory(object):
"""
Provides an interface to automatically construct pipelines for Spooq.
Example
-------
>>> pipeline_factory = PipelineFactory()
>>>
>>> # Fetch user data set with applied mapping, filtering,
>>> # and cleaning transformers
>>> df = pipeline_factory.execute({
>>> "entity_type": "user",
>>> "date": "2018-10-20",
>>> "time_range": "last_day"})
>>>
>>> # Load user data partition with applied mapping, filtering,
>>> # and cleaning transformers to a hive database
>>> pipeline_factory.execute({
>>> "entity_type": "user",
>>> "date": "2018-10-20",
>>> "batch_size": "daily"})
Attributes
----------
url : :any:`str`, (Defaults to "http://localhost:5000/pipeline/get")
The end point of an expert system which will be called to infer names and parameters.
Note
----
PipelineFactory is only responsible for querying an expert system with provided parameters
and constructing a Spooq pipeline out of the response. It does not have any reasoning capabilities
itself! It requires therefore a HTTP service responding with a JSON object containing following structure:
::
{
"extractor": {"name": "Type1Extractor", "params": {"key 1": "val 1", "key N": "val N"}},
"transformers": [
{"name": "Type1Transformer", "params": {"key 1": "val 1", "key N": "val N"}},
{"name": "Type2Transformer", "params": {"key 1": "val 1", "key N": "val N"}},
{"name": "Type3Transformer", "params": {"key 1": "val 1", "key N": "val N"}},
{"name": "Type4Transformer", "params": {"key 1": "val 1", "key N": "val N"}},
{"name": "Type5Transformer", "params": {"key 1": "val 1", "key N": "val N"}},
],
"loader": {"name": "Type1Loader", "params": {"key 1": "val 1", "key N": "val N"}}
}
Hint
----
There is an experimental implementation of an expert system which complies with the requirements
of PipelineFactory called `spooq_rules`. If you are interested, please ask the author of Spooq about it.
"""
def __init__(self, url="http://localhost:5000/pipeline/get"):
self.url = url
def execute(self, context_variables):
"""
Fetches a ready-to-go pipeline instance via :py:meth:`get_pipeline`
and executes it.
Parameters
----------
context_variables : :py:class:`dict`
These collection of parameters should describe the current context about the use case
of the pipeline. Please see the examples of the PipelineFactory class'
documentation.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
If the loader component is by-passed (in the case of ad_hoc use cases).
:any:`None`
If the loader component does not return a value (in the case of persisting data).
"""
pipeline = self.get_pipeline(context_variables)
return pipeline.execute()
def get_metadata(self, context_variables):
"""
Sends a POST request to the defined endpoint (`url`) containing the
supplied context variables.
Parameters
----------
context_variables : :py:class:`dict`
These collection of parameters should describe the current context about the use case
of the pipeline. Please see the examples of the PipelineFactory class'
documentation.
Returns
-------
:py:class:`dict`
Names and parameters of each ETL component to construct a Spooq pipeline
"""
return requests.post(self.url, json=context_variables).json()
def get_pipeline(self, context_variables):
"""
Fetches the necessary metadata via :py:meth:`get_metadata` and
returns a ready-to-go pipeline instance.
Parameters
----------
context_variables : :py:class:`dict`
These collection of parameters should describe the current context about the use case
of the pipeline. Please see the examples of the PipelineFactory class'
documentation.
Returns
-------
:py:class:`~spooq2.Pipeline`
A Spooq pipeline instance which is fully configured and can still be
adapted and consequently executed.
"""
metadata = self.get_metadata(context_variables)
pipeline = Pipeline()
extractor = self._get_extractor(metadata)
pipeline.set_extractor(extractor)
transformers = self._get_transformers(metadata)
pipeline.add_transformers(transformers)
loader = self._get_loader(metadata)
if not loader:
pipeline.bypass_loader = True
else:
pipeline.set_loader(loader)
return pipeline
def _get_extractor(self, magic_data):
extractor_class = getattr(E, magic_data.get("extractor", {}).get("name", ""))
extractor_params = magic_data.get("extractor", {}).get("params", "")
return extractor_class(**extractor_params)
def _get_transformers(self, magic_data):
transformers = []
for transformer in magic_data["transformers"]:
transformer_class = getattr(T, transformer["name"])
print(transformer_class)
transformers.append(transformer_class(**transformer["params"]))
return transformers
def _get_loader(self, magic_data):
loader_name = magic_data.get("loader", {}).get("name", "")
if loader_name == "ByPass":
return False
loader_class = getattr(L, loader_name)
loader_params = magic_data.get("loader", {}).get("params", "")
return loader_class(**loader_params)
|
rt-phb/Spooq | src/spooq2/loader/loader.py | """
Loaders take a :py:class:`pyspark.sql.DataFrame` as an input and save it to a sink.
Each Loader class has to have a `load` method which takes a DataFrame as single paremter.
Possible Loader sinks can be **Hive Tables**, **Kudu Tables**, **HBase Tables**, **JDBC
Sinks** or **ParquetFiles**.
"""
from builtins import object
import logging
class Loader(object):
"""
Base Class of Loader Objects.
Attributes
----------
name : :any:`str`
Sets the `__name__` of the class' type as `name`, which is essentially the Class' Name.
logger : :any:`logging.Logger`
Shared, class level logger for all instances.
"""
def __init__(self):
self.name = type(self).__name__
self.logger = logging.getLogger("spooq2")
def load(self, input_df):
"""
Persists data from a PySpark DataFrame to a target table.
Parameters
----------
input_df : :any:`pyspark.sql.DataFrame`
Input DataFrame which has to be loaded to a target destination.
Note
----
This method takes only a single DataFrame as an input parameter. All other needed
parameters are defined in the initialization of the Loader object.
"""
raise NotImplementedError("This method has to be implemented in the subclasses")
def __str__(self):
return "Loader Object of Class {nm}".format(nm=self.name)
|
rt-phb/Spooq | docs/source/base_classes/create_loader/parquet.py | <filename>docs/source/base_classes/create_loader/parquet.py
from pyspark.sql import functions as F
from loader import Loader
class ParquetLoader(loader):
"""
This is a simplified example on how to implement a new loader class.
Please take your time to write proper docstrings as they are automatically
parsed via Sphinx to build the HTML and PDF documentation.
Docstrings use the style of Numpy (via the napoleon plug-in).
This class uses the :meth:`pyspark.sql.DataFrameWriter.parquet` method internally.
Examples
--------
input_df = some_extractor_instance.extract()
output_df = some_transformer_instance.transform(input_df)
ParquetLoader(
path="data/parquet_files",
partition_by="dt",
explicit_partition_values=20200201,
compression=""gzip""
).load(output_df)
Parameters
----------
path: :any:`str`
The path to where the loader persists the output parquet files.
If partitioning is set, this will be the base path where the partitions
are stored.
partition_by: :any:`str` or :any:`list` of (:any:`str`)
The column name or names by which the output should be partitioned.
If the partition_by parameter is set to None, no partitioning will be
performed.
Defaults to "dt"
explicit_partition_values: :any:`str` or :any:`int`
or :any:`list` of (:any:`str` and :any:`int`)
Only allowed if partition_by is not None.
If explicit_partition_values is not None, the dataframe will
* overwrite the partition_by columns values if it already exists or
* create and fill the partition_by columns if they do not yet exist
Defaults to None
compression: :any:`str`
The compression codec used for the parquet output files.
Defaults to "snappy"
Raises
------
:any:`exceptions.AssertionError`:
explicit_partition_values can only be used when partition_by is not None
:any:`exceptions.AssertionError`:
explicit_partition_values and partition_by must have the same length
"""
def __init__(self, path, partition_by="dt", explicit_partition_values=None, compression_codec="snappy"):
super(ParquetLoader, self).__init__()
self.path = path
self.partition_by = partition_by
self.explicit_partition_values = explicit_partition_values
self.compression_codec = compression_codec
if explicit_partition_values is not None:
assert (partition_by is not None,
"explicit_partition_values can only be used when partition_by is not None")
assert (len(partition_by) == len(explicit_partition_values),
"explicit_partition_values and partition_by must have the same length")
def load(self, input_df):
self.logger.info("Persisting DataFrame as Parquet Files to " + self.path)
if isinstance(self.explicit_partition_values, list):
for (k, v) in zip(self.partition_by, self.explicit_partition_values):
input_df = input_df.withColumn(k, F.lit(v))
elif isinstance(self.explicit_partition_values, basestring):
input_df = input_df.withColumn(self.partition_by, F.lit(self.explicit_partition_values))
input_df.write.parquet(
path=self.path,
partitionBy=self.partition_by,
compression=self.compression_codec
)
|
rt-phb/Spooq | src/spooq2/extractor/json_files.py | from __future__ import absolute_import
from pyspark.sql import SparkSession
from py4j.protocol import Py4JJavaError
from .extractor import Extractor
from .tools import remove_hdfs_prefix, fix_suffix, infer_input_path_from_partition
class JSONExtractor(Extractor):
"""
The JSONExtractor class provides an API to extract data stored as JSON format,
deserializes it into a PySpark dataframe and returns it. Currently only
single-line JSON files are supported, stored either as textFile or sequenceFile.
Examples
--------
>>> from spooq2 import extractor as E
>>> extractor = E.JSONExtractor(input_path="tests/data/schema_v1/sequenceFiles")
>>> extractor.input_path == "tests/data/schema_v1/sequenceFiles" + "/*"
True
>>> extractor = E.JSONExtractor(
>>> base_path="tests/data/schema_v1/sequenceFiles",
>>> partition="20200201"
>>> )
>>> extractor.input_path == "tests/data/schema_v1/sequenceFiles" + "/20/02/01" + "/*"
True
Parameters
----------
input_path : :any:`str`
The path from which the JSON files should be loaded ("/\\*" will be added if omitted)
base_path : :any:`str`
Spooq tries to infer the ``input_path`` from the ``base_path`` and the ``partition`` if the
``input_path`` is missing.
partition : :any:`str` or :any:`int`
Spooq tries to infer the ``input_path`` from the ``base_path`` and the ``partition`` if the
``input_path`` is missing.
Only daily partitions in the form of "YYYYMMDD" are supported. e.g., "20200201" => <base_path> + "/20/02/01/\\*"
Returns
-------
:any:`pyspark.sql.DataFrame`
The extracted data set as a PySpark DataFrame
Raises
------
:any:`AttributeError`
Please define either ``input_path`` or ``base_path`` and ``partition``
Warning
---------
Currently only single-line JSON files stored as SequenceFiles or TextFiles are supported!
Note
----
The init method checks which input parameters are provided and derives the final input_path
from them accordingly.
If ``input_path`` is not :any:`None`:
Cleans ``input_path`` and returns it as the final ``input_path``
Elif ``base_path`` and ``partition`` are not :any:`None`:
Cleans ``base_path``, infers the sub path from the ``partition``
and returns the combined string as the final ``input_path``
Else:
Raises an :any:`AttributeError`
"""
def __init__(self, input_path=None, base_path=None, partition=None):
super(JSONExtractor, self).__init__()
self.input_path = self._get_path(input_path=input_path,
base_path=base_path,
partition=partition)
self.base_path = base_path
self.partition = partition
self.spark = SparkSession.Builder()\
.enableHiveSupport()\
.appName('spooq2.extractor: {nm}'.format(nm=self.name))\
.getOrCreate()
def extract(self):
"""
This is the Public API Method to be called for all classes of Extractors
Parameters
----------
Returns
-------
:py:class:`pyspark.sql.DataFrame`
Complex PySpark DataFrame deserialized from the input JSON Files
"""
self.logger.info('Loading Raw RDD from: ' + self.input_path)
rdd_raw = self._get_raw_rdd(self.input_path)
rdd_strings = self._get_values_as_string_rdd(rdd_raw)
return self._convert_rdd_to_df(rdd_strings)
def _get_path(self, input_path=None, base_path=None, partition=None):
"""
Checks which input parameters are provided and derives the final input_path from them.
If :py:data:`input_path` is not :any:`None`:
Cleans :py:data:`input_path` and returns it as the final input_path
If :py:data:`base_path` and :py:data:`partition` are not :any:`None`:
Cleans :py:data:`base_path`, infers the sub path from the :py:data:`partition` and returns
the combined String as the final input_path
If none of the above holds true, an Exception is raised
Parameters
----------
input_path : :any:`str`
base_path : :any:`str`
partition : :any:`str` or :any:`int`
Returns
-------
:any:`str`
The final input_path to be used for Extraction.
Raises
------
:py:class:`AttributeError`
Please define either (input_path) or (base_path and partition)
Examples
--------
>>> _get_path(input_path=u'/user/furia_salamandra_faerfax/data')
u'/user/furia_salamandra_faerfax/data/*'
>>> _get_path(base_path=u'/user/furia_salamandra_faerfax/data', partition=20180101)
u'/user/furia_salamandra_faerfax/data/18/01/01/*'
See Also
--------
:py:meth:`fix_suffix`
:py:meth:`remove_hdfs_prefix`
"""
if input_path:
return fix_suffix(remove_hdfs_prefix(input_path))
elif base_path and partition:
return infer_input_path_from_partition(
base_path=base_path,
partition=partition)
else:
error_msg = 'Please define either (input_path) or (base_path and partition)'
self.logger.error(error_msg)
raise AttributeError(error_msg)
def _get_raw_rdd(self, input_path):
"""
Loads TextFiles containing JSON Strings from :py:data:`input_path` and parallelizes them
into an :any:pyspark.RDD`.
Parameters
----------
input_path : :any:`str`
Returns
-------
:any:`pyspark.RDD`
Output RDD with one JSON String per Record
See Also
--------
pyspark.SparkContext.textFile
"""
try:
return self._get_raw_sequence_rdd(input_path)
except Py4JJavaError:
return self._get_raw_text_rdd(input_path)
def _get_raw_text_rdd(self, input_path):
self.logger.debug('Fetching TextFile containing JSON')
return self.spark.sparkContext.textFile(input_path)
def _get_raw_sequence_rdd(self, input_path):
self.logger.debug('Fetching SequenceFile containing JSON')
return self.spark.sparkContext.sequenceFile(input_path).map(lambda k_v: k_v[1].decode("utf-8"))
def _convert_rdd_to_df(self, rdd_strings):
"""
Converts the input RDD :py:data:`rdd_strings` to a DataFrame with inferred Structure
and DataTypes and returns it.
Parameters
----------
rdd_strings : :any:`pyspark.RDD`
Input RDD containing only unicode JSON Strings per Record
Returns
-------
:any:`pyspark.sql.DataFrame`
Complex DataFrame with set of all found Attributes from input JSON Files
See Also
--------
pyspark.sql.DataFrameReader.json
"""
self.logger.debug('Deserializing JSON from String RDD to DataFrame')
return self.spark.read.json(rdd_strings)
def _get_values_as_string_rdd(self, rdd_raw):
"""
Removes newline ``\n`` and carriage return ``\r`` characters.
Parameters
----------
rdd_raw : :any:`pyspark.RDD`
Returns
-------
:any:`pyspark.RDD`
Output RDD with one JSON String per Record
"""
self.logger.debug('Cleaning JSON String RDD (selecting values, ' +
'removing newline and carriage return)')
return rdd_raw.map(lambda v: v.replace('\\n', ' ').replace('\\r', ''))
|
rt-phb/Spooq | tests/unit/transformer/test_exploder.py | from builtins import str
from builtins import object
import pytest
import json
from pyspark.sql import functions as sql_funcs
from pyspark.sql import Row
from spooq2.transformer import Exploder
class TestBasicAttributes(object):
"""Mapper for Exploding Arrays"""
def test_logger_should_be_accessible(self):
assert hasattr(Exploder(), "logger")
def test_name_is_set(self):
assert Exploder().name == "Exploder"
def test_str_representation_is_correct(self):
assert str(Exploder()) == "Transformer Object of Class Exploder"
class TestExploding(object):
@pytest.fixture(scope="module")
def input_df(self, spark_session):
return spark_session.read.parquet("data/schema_v1/parquetFiles")
@pytest.fixture()
def default_params(self):
return {"path_to_array": "attributes.friends", "exploded_elem_name": "friend"}
@pytest.mark.slow
def test_count(self, input_df, default_params):
expected_count = input_df.select(sql_funcs.explode(input_df[default_params["path_to_array"]])).count()
actual_count = Exploder(**default_params).transform(input_df).count()
assert expected_count == actual_count
@pytest.mark.slow
def test_exploded_array_is_added(self, input_df, default_params):
transformer = Exploder(**default_params)
expected_columns = set(input_df.columns + [default_params["exploded_elem_name"]])
actual_columns = set(transformer.transform(input_df).columns)
assert expected_columns == actual_columns
@pytest.mark.slow
def test_array_is_converted_to_struct(self, input_df, default_params):
def get_data_type_of_column(df, path=["attributes"]):
record = df.first().asDict(recursive=True)
for p in path:
record = record[p]
return type(record)
current_data_type_friend = get_data_type_of_column(input_df, path=["attributes", "friends"])
assert issubclass(current_data_type_friend, list)
transformed_df = Exploder(**default_params).transform(input_df)
transformed_data_type = get_data_type_of_column(transformed_df, path=["friend"])
assert issubclass(transformed_data_type, dict)
def test_records_with_empty_arrays_are_dropped_by_default(self, spark_session):
input_df = spark_session.createDataFrame([
Row(id=1, array_to_explode=[]),
Row(id=2, array_to_explode=[Row(elem_id="a"), Row(elem_id="b"), Row(elem_id="c")]),
Row(id=3, array_to_explode=[]),
])
transformed_df = Exploder(path_to_array="array_to_explode", exploded_elem_name="elem").transform(input_df)
assert transformed_df.count() == 3
def test_records_with_empty_arrays_are_kept_via_setting(self, spark_session):
input_df = spark_session.createDataFrame([
Row(id=1, array_to_explode=[]),
Row(id=2, array_to_explode=[Row(elem_id="a"), Row(elem_id="b"), Row(elem_id="c")]),
Row(id=3, array_to_explode=[]),
])
transformed_df = Exploder(path_to_array="array_to_explode",
exploded_elem_name="elem",
drop_rows_with_empty_array=False).transform(input_df)
assert transformed_df.count() == 5
|
rt-phb/Spooq | tests/unit/extractor/test_jdbc.py | <reponame>rt-phb/Spooq<filename>tests/unit/extractor/test_jdbc.py
from __future__ import division
from builtins import str
from builtins import object
from past.utils import old_div
import pytest
import sqlite3
import pandas as pd
from doubles import allow
from spooq2.extractor import JDBCExtractorIncremental
@pytest.fixture()
def default_params():
return {
"jdbc_options": {
"url": "url",
"driver": "driver",
"user": "user",
"password": "password",
},
"partition": 20180518,
"source_table": "MOCK_DATA",
"spooq2_values_table": "test_spooq2_values_tbl",
"spooq2_values_db": "test_spooq2_values_db",
"spooq2_values_partition_column": "updated_at",
"cache": True,
}
@pytest.fixture()
def extractor(default_params):
return JDBCExtractorIncremental(**default_params)
@pytest.fixture()
def spooq2_values_pd_df(spark_session, default_params):
# fmt: off
input_data = {
'partition_column': ['updated_at', 'updated_at', 'updated_at'],
'dt': [20180515, 20180516, 20180517],
'first_value': ['2018-01-01 03:30:00', '2018-05-16 03:30:00', '2018-05-17 03:30:00'],
'last_value': ['2018-05-16 03:29:59', '2018-05-17 03:29:59', '2018-05-18 03:29:59']
}
# fmt: on
pd_df = pd.DataFrame(input_data)
spark_session.conf.set("hive.exec.dynamic.partition", "true")
spark_session.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["spooq2_values_db"]))
spark_session.sql("CREATE DATABASE {db}".format(db=default_params["spooq2_values_db"]))
spark_session.createDataFrame(pd_df).write.partitionBy("dt").saveAsTable(
"{db}.{tbl}".format(db=default_params["spooq2_values_db"], tbl=default_params["spooq2_values_table"])
)
yield pd_df
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["spooq2_values_db"]))
@pytest.fixture()
def sqlite_url(spark_session, tmp_path):
with open("data/schema_v2/create_MOCK_DATA.sql", "r") as sql_script:
ddl = sql_script.read()
db_file = str(tmp_path + "/db_file")
conn = sqlite3.connect(db_file)
conn.executescript(ddl)
conn.commit()
yield "jdbc:sqlite:" + db_file
conn.close()
class TestJDBCExtractorIncremental(object):
"""Testing of spooq2.extractor.JDBCExtractorIncremental
spooq2_values_pd_df:
partition_column dt first_value last_value
0 updated_at 20180515 2018-01-01 03:30:00 2018-05-16 03:29:59
1 updated_at 20180516 2018-05-16 03:30:00 2018-05-17 03:29:59
2 updated_at 20180517 2018-05-17 03:30:00 2018-05-18 03:29:59
"""
class TestBasicAttributes(object):
def test_logger_should_be_accessible(self, extractor):
assert hasattr(extractor, "logger")
def test_name_is_set(self, extractor):
assert extractor.name == "JDBCExtractorIncremental"
def test_str_representation_is_correct(self, extractor):
assert str(extractor) == "Extractor Object of Class JDBCExtractorIncremental"
class TestBoundaries(object):
"""Deriving boundaries from previous loads logs (spooq2_values_pd_df)"""
@pytest.mark.parametrize(
("partition", "value"),
[(20180510, "2018-01-01 03:30:00"), (20180515, "2018-05-16 03:30:00"), (20180516, "2018-05-17 03:30:00")],
)
def test__get_lower_bound_from_succeeding_partition(self, partition, value, spooq2_values_pd_df, extractor):
"""Getting the upper boundary partition to load"""
assert extractor._get_lower_bound_from_succeeding_partition(spooq2_values_pd_df, partition) == value
@pytest.mark.parametrize(
("partition", "value"),
[
(20180516, "2018-05-16 03:29:59"),
(20180517, "2018-05-17 03:29:59"),
(20180518, "2018-05-18 03:29:59"),
(20180520, "2018-05-18 03:29:59"),
],
)
def test__get_upper_bound_from_preceding_partition(self, partition, value, spooq2_values_pd_df, extractor):
"""Getting the lower boundary partition to load"""
assert extractor._get_upper_bound_from_preceding_partition(spooq2_values_pd_df, partition) == value
@pytest.mark.parametrize(
("partition", "boundaries"),
[
(20180515, ("2018-01-01 03:30:00", "2018-05-16 03:29:59")),
(20180516, ("2018-05-16 03:30:00", "2018-05-17 03:29:59")),
(20180517, ("2018-05-17 03:30:00", "2018-05-18 03:29:59")),
],
)
def test__get_lower_and_upper_bounds_from_current_partition(
self, partition, boundaries, spooq2_values_pd_df, extractor
):
assert extractor._get_lower_and_upper_bounds_from_current_partition(
spooq2_values_pd_df, partition
) == tuple(boundaries)
@pytest.mark.slow
def test__get_previous_boundaries_table(self, extractor, spooq2_values_pd_df):
"""Getting boundaries from previously loaded partitions"""
try:
del extractor.spooq2_values_partition_column
except AttributeError:
pass
assert not hasattr(extractor, "spooq2_values_partition_column")
df = extractor._get_previous_boundaries_table(extractor.spooq2_values_db, extractor.spooq2_values_table)
assert 3 == df.count()
assert extractor.spooq2_values_partition_column == "updated_at"
# fmt: off
@pytest.mark.parametrize(
("partition", "boundaries"),
[(20180510, (False, '2018-01-01 03:30:00')),
(20180515, ('2018-01-01 03:30:00', '2018-05-16 03:29:59')),
(20180516, ('2018-05-16 03:30:00', '2018-05-17 03:29:59')),
(20180517, ('2018-05-17 03:30:00', '2018-05-18 03:29:59')),
(20180518, ('2018-05-18 03:29:59', False)),
(20180520, ('2018-05-18 03:29:59', False))]
)
# fmt: on
def test__get_boundaries_for_import(self, extractor, spooq2_values_pd_df, partition, boundaries):
assert extractor._get_boundaries_for_import(partition) == boundaries
class TestQueryConstruction(object):
"""Constructing Query for Source Extraction with Boundaries in Where Clause"""
@pytest.mark.parametrize(
("boundaries", "expected_query"),
[
((False, False), "select * from MOCK_DATA"),
((False, 1024), "select * from MOCK_DATA where updated_at <= 1024"),
((False, "1024"), "select * from MOCK_DATA where updated_at <= 1024"),
((False, "g1024"), 'select * from MOCK_DATA where updated_at <= "g1024"'),
(
(False, "2018-05-16 03:29:59"),
'select * from MOCK_DATA where updated_at <= "2018-05-16 03:29:59"',
),
((1024, False), "select * from MOCK_DATA where updated_at > 1024"),
(("1024", False), "select * from MOCK_DATA where updated_at > 1024"),
(("g1024", False), 'select * from MOCK_DATA where updated_at > "g1024"'),
(
("2018-05-16 03:29:59", False),
'select * from MOCK_DATA where updated_at > "2018-05-16 03:29:59"',
),
(
("2018-01-01 03:30:00", "2018-05-16 03:29:59"),
'select * from MOCK_DATA where updated_at > "2018-01-01 03:30:00"'
+ ' and updated_at <= "2018-05-16 03:29:59"',
),
],
)
def test__construct_query_for_partition(self, boundaries, expected_query, extractor):
allow(extractor)._get_boundaries_for_import.and_return(boundaries)
actual_query = extractor._construct_query_for_partition(extractor.partition)
expected_query = " ".join(expected_query.split())
assert actual_query == expected_query
@pytest.mark.parametrize("key", ["url", "driver", "user", "password"])
class TestJDBCOptions(object):
def test_missing_jdbc_option_raises_error(self, key, default_params):
del default_params["jdbc_options"][key]
with pytest.raises(AssertionError) as excinfo:
JDBCExtractorIncremental(**default_params)
assert key + " is missing from the jdbc_options." in str(excinfo.value)
def test_wrong_jdbc_option_raises_error(self, key, default_params):
default_params["jdbc_options"][key] = 123
with pytest.raises(AssertionError) as excinfo:
JDBCExtractorIncremental(**default_params)
assert key + " has to be provided as a string object." in str(excinfo.value)
|
rt-phb/Spooq | src/spooq2/extractor/extractor.py | """
Extractors are used to fetch, extract and convert a source data set into a PySpark DataFrame.
Exemplary extraction sources are **JSON Files** on file systems like HDFS, DBFS or EXT4
and relational database systems via **JDBC**.
"""
from builtins import object
import logging
class Extractor(object):
"""
Base Class of Extractor Classes.
Attributes
----------
name : :any:`str`
Sets the `__name__` of the class' type as `name`, which is essentially the Class' Name.
logger : :any:`logging.Logger`
Shared, class level logger for all instances.
"""
def __init__(self):
self.name = type(self).__name__
self.logger = logging.getLogger("spooq2")
def extract(self):
"""
Extracts Data from a Source and converts it into a PySpark DataFrame.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
Note
----
This method does not take ANY input parameters. All needed parameters are defined
in the initialization of the Extractor Object.
"""
raise NotImplementedError("This method has to be implemented in the subclasses")
def __str__(self):
return "Extractor Object of Class {nm}".format(nm=self.name)
|
rt-phb/Spooq | docs/source/base_classes/create_transformer/no_id_dropper.py | from transformer import Transformer
class NoIdDropper(Transformer):
"""
This is a simplified example on how to implement a new transformer class.
Please take your time to write proper docstrings as they are automatically
parsed via Sphinx to build the HTML and PDF documentation.
Docstrings use the style of Numpy (via the napoleon plug-in).
This class uses the :meth:`pyspark.sql.DataFrame.dropna` method internally.
Examples
--------
input_df = some_extractor_instance.extract()
transformed_df = NoIdDropper(
id_columns='user_id'
).transform(input_df)
Parameters
----------
id_columns: :any:`str` or :any:`list`
The name of the column containing the identifying Id values.
Defaults to "id"
Raises
------
:any:`exceptions.ValueError`:
"how ('" + how + "') should be 'any' or 'all'"
:any:`exceptions.ValueError`:
"subset should be a list or tuple of column names"
"""
def __init__(self, id_columns='id'):
super(NoIdDropper, self).__init__()
self.id_columns = id_columns
def transform(self, input_df):
self.logger.info("Dropping records without an Id (columns to consider: {col})"
.format(col=self.id_columns))
output_df = input_df.dropna(
how='all',
thresh=None,
subset=self.id_columns
)
return output_df
|
rt-phb/Spooq | src/spooq2/loader/__init__.py | <gh_stars>1-10
from .loader import Loader
from .hive_loader import HiveLoader
__all__ = [
"Loader",
"HiveLoader",
]
|
rt-phb/Spooq | tests/unit/extractor/test_json_files.py | from builtins import str
from builtins import object
import pytest
from pyspark.sql.dataframe import DataFrame
from spooq2.extractor import JSONExtractor
from spooq2.extractor.tools import infer_input_path_from_partition
@pytest.fixture()
def default_extractor():
return JSONExtractor(input_path="some/path")
class TestBasicAttributes(object):
def test_logger_should_be_accessible(self, default_extractor):
assert hasattr(default_extractor, "logger")
def test_name_is_set(self, default_extractor):
assert default_extractor.name == "JSONExtractor"
def test_str_representation_is_correct(self, default_extractor):
assert str(default_extractor) == "Extractor Object of Class JSONExtractor"
class TestPathManipulation(object):
"""Path manipulating Methods"""
# fmt: off
@pytest.mark.parametrize(("input_params", "expected_path"), [
(('base', 20170601), 'base/17/06/01/*'),
(('/base', '20170601'), '/base/17/06/01/*'),
(('hdfs://nameservice-ha/base/path', '20170601'), '/base/path/17/06/01/*'),
(('hdfs://nameservice-ha:8020/base/path', '20170601'), '/base/path/17/06/01/*')])
# fmt: on
def test_infer_input_path_from_partition(self, input_params, expected_path):
assert expected_path == infer_input_path_from_partition(*input_params)
# fmt: off
@pytest.mark.parametrize(("input_params", "expected_path"), [
(('hdfs://nameservice-ha:8020/full/input/path/provided', None, None),
'/full/input/path/provided/*'),
((None, '/base/path/to/file', 20180723),
'/base/path/to/file/18/07/23/*'),
((None, 'hdfs://nameservice-ha/base/path/to/file', 20180723),
'/base/path/to/file/18/07/23/*')])
# fmt: on
def test__get_path(self, input_params, expected_path, default_extractor):
"""Chooses whether to use Full Input Path or derive it from Base Path and Partition"""
assert expected_path == default_extractor._get_path(*input_params)
@pytest.mark.parametrize(
"input_path",
["data/schema_v1/sequenceFiles", "data/schema_v1/textFiles"]
)
class TestExtraction(object):
"""Extraction of JSON Files"""
@pytest.fixture(scope="class")
def expected_df(self, spark_session):
df = spark_session.read.parquet("data/schema_v1/parquetFiles/*")
df = df.drop("birthday") # duplicated column due to manual date conversions in parquet
return df
def test_conversion(self, input_path, expected_df):
"""JSON File is converted to a DataFrame"""
extractor = JSONExtractor(input_path=input_path)
assert isinstance(extractor.extract(), DataFrame)
def test_schema(self, input_path, expected_df):
"""JSON File is converted to the correct schema"""
extractor = JSONExtractor(input_path=input_path)
result_df = extractor.extract()
assert expected_df.schema == result_df.schema
def test_count(self, input_path, expected_df):
"""Converted DataFrame contains the same Number of Rows as in the Source Data"""
extractor = JSONExtractor(input_path=input_path)
assert expected_df.count() == extractor.extract().count()
|
rt-phb/Spooq | src/spooq2/transformer/flattener.py | from pyspark.sql import functions as F, types as T
from pyspark.sql.utils import AnalysisException
import sys
import json
from .transformer import Transformer
from spooq2.transformer import Exploder, Mapper
class Flattener(Transformer):
"""
Flattens and explodes an input DataFrame.
Example
-------
>>> import datetime
>>> from pyspark.sql import Row
>>> from spooq2.transformer import Flattener
>>>
>>> input_df = spark.createDataFrame([Row(
>>> struct_val_1=Row(
>>> struct_val_2=Row(
>>> struct_val_3=Row(
>>> struct_val_4=Row(int_val=4789),
>>> long_val=478934243342334),
>>> string_val="Hello"),
>>> double_val=43.12),
>>> timestamp_val=datetime.datetime(2021, 1, 1, 12, 30, 15)
>>> )])
>>> input_df.printSchema()
root
|-- struct_val_1: struct (nullable = true)
| |-- struct_val_2: struct (nullable = true)
| | |-- struct_val_3: struct (nullable = true)
| | | |-- struct_val_4: struct (nullable = true)
| | | | |-- int_val: long (nullable = true)
| | | |-- long_val: long (nullable = true)
| | |-- string_val: string (nullable = true)
| |-- double_val: double (nullable = true)
|-- timestamp_val: timestamp (nullable = true)
>>>
>>> flat_df = Flattener().transform(input_df)
[spooq2] 2021-02-19 15:47:59,921 INFO flattener::_explode_and_get_mapping::90: Exploding Input DataFrame and Generating Mapping (This can take some time depending on the complexity of the input DataFrame)
[spooq2] 2021-02-19 15:48:01,870 INFO mapper::transform::117: Generating SQL Select-Expression for Mapping...
[spooq2] 2021-02-19 15:48:01,942 INFO mapper::transform::143: SQL Select-Expression for new mapping generated!
>>> flat_df.printSchema()
root
|-- int_val: long (nullable = true)
|-- long_val: long (nullable = true)
|-- string_val: string (nullable = true)
|-- double_val: double (nullable = true)
|-- timestamp_val: timestamp (nullable = true)
>>>
Parameters
----------
pretty_names : :any:`bool`, Defaults to True
Defines if Spooq should try to use the shortest name possible
(starting from the deepest key / rightmost in the path)
keep_original_columns : :any:`bool`, Defaults to False
Whether the original columns should be kept under the ``original_columns`` struct next to the
flattenend columns.
convert_timestamps : :any:`bool`, Defaults to True
Defines if Spooq should use special timestamp and datetime transformation on column names
with specific suffixes (_at, _time, _date)
ignore_ambiguous_columns : :any:`bool`, Defaults to True
This flag is forwarded to the Mapper Transformer.
"""
def __init__(self, pretty_names=True, keep_original_columns=False, convert_timestamps=True, ignore_ambiguous_columns=True):
super().__init__()
self.pretty_names = pretty_names
self.keep_original_columns = keep_original_columns
self.convert_timestamps = convert_timestamps
self.ignore_ambiguous_columns = ignore_ambiguous_columns
self.python_script = []
def transform(self, input_df):
exploded_df, mapping = self._explode_and_get_mapping(input_df)
mapped_df = Mapper(mapping=mapping, ignore_ambiguous_columns=self.ignore_ambiguous_columns).transform(exploded_df)
return mapped_df
def get_script(self, input_df):
"""
Flattens and explodes an input DataFrame but instead of directly applying it to the input DataFrame,
it returns a script that contains all imports, explosion transformations, and the resulting mapping.
Parameters
----------
input_df : :py:class:`pyspark.sql.DataFrame`
Input DataFrame
Returns
-------
:any:`list` of :any:`str`
List of code lines to execute the flattening steps explicitly.
Hint
----
See :py:mod:`spooq2.transformer.flattener.Flattener.export_script` for exporting the script to a file.
"""
self._explode_and_get_mapping(input_df)
return self.python_script
def export_script(self, input_df, file_name):
"""
Flattens and explodes an input DataFrame but instead of directly applying it to the input DataFrame,
it exports a script that contains all imports, explosion transformations, and the resulting mapping
to an external file.
Parameters
----------
input_df : :py:class:`pyspark.sql.DataFrame`
Input DataFrame
file_name : :any:`str`
Name of file to which the script should be exported to.
Attention
---------
Currently, only local paths are possible as it uses Python's internal ``open()`` function.
Hint
----
See :py:mod:`spooq2.transformer.flattener.Flattener.get_script` for getting the script as a python object.
"""
with open(file_name, "w") as file_handle:
for line in self.get_script(input_df):
file_handle.write(f"{line}\n")
self.logger.info(f"Flattening Tranformation Script was exported to {file_name}")
def _explode_and_get_mapping(self, input_df):
self.logger.info("Exploding Input DataFrame and Generating Mapping (This can take some time depending on the complexity of the input DataFrame)")
initial_mapping = []
if self.keep_original_columns:
input_df = input_df.withColumn("original_columns", F.struct(*input_df.columns))
self._script_set_imports()
self._script_add_input_statement(input_df)
exploded_df, preliminary_mapping = self._get_preliminary_mapping(input_df, input_df.schema.jsonValue(), initial_mapping, [], [])
fixed_mapping = self._convert_python_to_spark_data_types(preliminary_mapping)
if self.keep_original_columns:
fixed_mapping.insert(0, ("original_columns", "original_columns", "as_is"))
self._script_apply_mapping(mapping=fixed_mapping)
return exploded_df, fixed_mapping
def _get_preliminary_mapping(self, input_df, json_schema, mapping, current_path, exploded_arrays):
for field in json_schema["fields"]:
self.logger.debug(json.dumps(field, indent=2))
if self._field_is_original_columns_struct(field) and self.keep_original_columns:
continue
elif self._field_is_atomic(field):
self.logger.debug(f"Atomic Field found: {field['name']}")
mapping = self._add_field_to_mapping(mapping, current_path, field)
elif self._field_is_struct(field):
self.logger.debug(f"Struct Field found: {field['name']}")
struct_name = field["name"]
new_path = current_path + [struct_name]
input_df, mapping = self._get_preliminary_mapping(input_df=input_df, json_schema=field["type"], mapping=mapping, current_path=new_path, exploded_arrays=exploded_arrays)
elif self._field_is_array(field):
self.logger.debug(f"Array Field found: {field['name']}")
pretty_field_name = field["name"]
field_name = "_".join(current_path + [pretty_field_name])
array_path = ".".join(current_path + [pretty_field_name])
if array_path in exploded_arrays:
self.logger.debug(f"Skipping explosion of {field_name}, as it was already exploded")
continue
else:
if self.pretty_names:
try:
input_df[f"{pretty_field_name}_exploded"]
# If no exception is thrown, then the name already taken and the full path will be used
exploded_elem_name = f"{field_name}_exploded"
except AnalysisException:
exploded_elem_name = f"{pretty_field_name}_exploded"
else:
exploded_elem_name = f"{field_name}_exploded"
self.logger.debug(f"Exploding {array_path} into {exploded_elem_name}")
exploded_df = Exploder(path_to_array=array_path, exploded_elem_name=exploded_elem_name).transform(input_df)
self._script_add_explode_transformation(path_to_array=array_path, exploded_elem_name=exploded_elem_name)
exploded_df = Exploder(path_to_array=array_path, exploded_elem_name=exploded_elem_name, drop_rows_with_empty_array=False).transform(input_df)
exploded_arrays.append(array_path)
return self._get_preliminary_mapping(input_df=exploded_df, json_schema=exploded_df.schema.jsonValue(), mapping=[], current_path=[], exploded_arrays=exploded_arrays)
return (input_df, mapping)
def _field_is_original_columns_struct(self, field):
return self._field_is_struct(field) and field["name"] == "original_columns"
def _field_is_atomic(self, field):
return isinstance(field["type"], str)
def _field_is_struct(self, field):
field_type = field["type"]
return (isinstance(field_type, dict) and
len(field_type.get("fields", [])) > 0 and
field_type.get("type", "") == "struct")
def _field_is_array(self, field):
field_type = field["type"]
return (isinstance(field_type, dict) and
"fields" not in field_type.keys() and
field_type.get("type", "") == "array")
def _add_field_to_mapping(self, mapping, current_path, field):
short_field_name = field["name"]
source_path_array = current_path + [short_field_name]
source_path = ".".join(source_path_array)
included_source_paths = [source_path for (_, source_path, _) in mapping]
included_field_names = [field_name for (field_name, _, _) in mapping]
self.logger.debug("mapping: " + str(mapping))
self.logger.debug("short_field_name: " + str(short_field_name))
self.logger.debug("source_path_array: " + str(source_path_array))
self.logger.debug("source_path: " + str(source_path))
self.logger.debug("included_source_paths: " + str(included_source_paths))
self.logger.debug("included_field_names: " + str(included_field_names))
if source_path in included_source_paths:
return mapping
if self.pretty_names:
self.logger.debug("Prettifying Names...")
field_name = short_field_name
self.logger.debug(f"Check if Field Name is unused: {field_name}")
if field_name in included_field_names:
self.logger.debug("Pretty Field Name already taken")
for source_path_element in reversed(source_path_array[:-1]):
field_name = "_".join([source_path_element, field_name])
self.logger.debug(f"Check if Field Name is unused: {field_name}")
if field_name not in included_field_names:
self.logger.debug(f"Found unused Pretty Field Name: {field_name}")
break
else:
field_name = "_".join(source_path_array)
field_name = field_name.replace("_exploded", "")
data_type = field["type"]
column_mapping = mapping[:]
additional_column_mapping = (field_name, source_path, data_type)
self.logger.debug(f"Adding mapping: {str(additional_column_mapping)}")
column_mapping.append(additional_column_mapping)
return column_mapping
def _convert_python_to_spark_data_types(self, mapping):
data_type_matrix = {
"long": "LongType",
"int": "IntegerType",
"string": "StringType",
"double": "DoubleType",
"float": "FloatType",
"boolean": "BooleanType",
"date": "DateType",
"timestamp": "TimestampType"
}
fixed_mapping = [
(name, source, data_type_matrix.get(data_type, data_type))
for (name, source, data_type)
in mapping
]
if self.convert_timestamps:
fixed_mapping_with_timestamps = []
for (column_name, source_path, data_type) in fixed_mapping:
if column_name.endswith(("_at", "_time")):
data_type = "extended_string_to_timestamp"
elif column_name.endswith("_date"):
data_type = "extended_string_to_date"
fixed_mapping_with_timestamps.append((column_name, source_path, data_type))
fixed_mapping = fixed_mapping_with_timestamps
return fixed_mapping
def _script_set_imports(self):
self.python_script = [
"from pyspark.sql import SparkSession",
"from pyspark.sql import functions as F, types as T",
"",
"from spooq2.transformer import Mapper, Exploder",
"",
"spark = SparkSession.builder.getOrCreate()",
]
self.logger.debug("\n".join(self.python_script))
def _script_add_input_statement(self, input_df):
input_file_names_string = ",".join([row.filename for row in input_df.select(F.input_file_name().alias("filename")).distinct().collect()])
self.python_script.append(f"input_df = spark.read.load('{input_file_names_string}')")
if self.keep_original_columns:
self.python_script.append("input_df = input_df.withColumn('original_columns', F.struct(*input_df.columns))")
self.python_script.append("")
self.logger.debug("\n".join(self.python_script))
def _script_add_explode_transformation(self, path_to_array, exploded_elem_name):
self.python_script.append(f"input_df = Exploder('{path_to_array}', '{exploded_elem_name}', drop_rows_with_empty_array=False).transform(input_df)")
self.logger.debug("\n".join(self.python_script))
def _script_apply_mapping(self, mapping):
string_lenghts = [[len(value) for value in mapping_line] for mapping_line in mapping]
max_col_len, max_source_len, max_type_len = [int(max(column)) for column in [*zip(*string_lenghts)]]
self.python_script.extend([
"",
"# fmt:off",
"mapping_to_apply = [",
])
for (column_name, source_path, data_type) in mapping:
mapping_string = " "
mapping_string += f"('{column_name}',"
whitespace_to_fill = max_col_len - len(column_name) + 1
mapping_string += " " * whitespace_to_fill
mapping_string += f"'{source_path}',"
whitespace_to_fill = max_source_len - len(source_path) + 1
mapping_string += " " * whitespace_to_fill
mapping_string += f"'{data_type}'),"
self.python_script.append(mapping_string)
self.python_script.extend([
"]",
"# fmt:on",
"",
"output_df = Mapper(mapping_to_apply).transform(input_df)"
])
self.logger.debug("\n".join(self.python_script))
|
rt-phb/Spooq | tests/unit/transformer/test_mapper.py | from builtins import str
from builtins import object
import pytest
from pyspark.sql import functions as F
from pyspark.sql import types as T
from pyspark.sql.utils import AnalysisException
from spooq2.transformer import Mapper
@pytest.fixture(scope="module")
def transformer(mapping):
return Mapper(mapping=mapping, ignore_missing_columns=True)
@pytest.fixture(scope="module")
def input_df(spark_session):
return spark_session.read.parquet("data/schema_v1/parquetFiles")
@pytest.fixture(scope="module")
def mapped_df(input_df, transformer):
return transformer.transform(input_df)
@pytest.fixture(scope="module")
def mapping():
"""
root
|-- _corrupt_record: string (nullable = true)
|-- attributes: struct (nullable = true)
| |-- birthday: string (nullable = true)
| |-- email: string (nullable = true)
| |-- first_name: string (nullable = true)
| |-- friends: array (nullable = true)
| | |-- element: struct (containsNull = true)
| | | |-- first_name: string (nullable = true)
| | | |-- id: long (nullable = true)
| | | |-- last_name: string (nullable = true)
| |-- gender: string (nullable = true)
| |-- ip_address: string (nullable = true)
| |-- last_name: string (nullable = true)
| |-- university: string (nullable = true)
|-- guid: string (nullable = true)
|-- id: long (nullable = true)
|-- location: struct (nullable = true)
| |-- latitude: string (nullable = true)
| |-- longitude: string (nullable = true)
|-- meta: struct (nullable = true)
| |-- created_at_ms: long (nullable = true)
| |-- created_at_sec: long (nullable = true)
| |-- version: long (nullable = true)
|-- birthday: timestamp (nullable = true)
"""
return [
("id", "id", "IntegerType"),
("guid", "guid", "StringType()"),
("created_at", "meta.created_at_sec", "timestamp_s_to_s"),
("created_at_ms", "meta.created_at_ms", "timestamp_ms_to_ms"),
("version", "meta.version", "IntegerType()"),
("birthday", "birthday", "TimestampType"),
("location_struct", "location", "as_is"),
("latitude", "location.latitude", "DoubleType"),
("longitude", "location.longitude", "DoubleType"),
("birthday_str", "attributes.birthday", "StringType"),
("email", "attributes.email", "StringType"),
("myspace", "attributes.myspace", "StringType"),
("first_name", "attributes.first_name", "StringBoolean"),
("last_name", "attributes.last_name", "StringBoolean"),
("gender", "attributes.gender", "StringType"),
("ip_address", "attributes.ip_address", "StringType"),
("university", "attributes.university", "StringType"),
("friends", "attributes.friends", "no_change"),
("friends_json", "attributes.friends", "json_string"),
]
class TestBasicAttributes(object):
"""Basic attributes and parameters"""
def test_logger(self, transformer):
assert hasattr(transformer, 'logger')
def test_name(self, transformer):
assert transformer.name == 'Mapper'
def test_str_representation(self, transformer):
assert str(transformer) == 'Transformer Object of Class Mapper'
class TestShapeOfMappedDataFrame(object):
def test_same_amount_of_records(self, input_df, mapped_df):
"""Amount of Rows is the same after the transformation"""
assert mapped_df.count() == input_df.count()
def test_same_amount_of_columns(self, mapping, mapped_df):
"""Amount of Columns of the mapped DF is according to the Mapping"""
assert len(mapped_df.columns) == len(mapping)
def test_columns_are_renamed(self, mapped_df, mapping):
"""Mapped DF has renamed the Columns according to the Mapping"""
assert mapped_df.columns == [name for (name, path, data_type) in mapping]
def test_base_column_is_missing_in_input(self, input_df, transformer, mapping):
input_df = input_df.drop("attributes")
mapped_df = transformer.transform(input_df)
assert mapped_df.columns == [name for (name, path, data_type) in mapping]
def test_struct_column_is_empty_in_input(self, input_df, transformer, mapping):
input_df = input_df.withColumn("attributes", F.lit(None))
mapped_df = transformer.transform(input_df)
assert mapped_df.columns == [name for (name, path, data_type) in mapping]
def test_input_dataframe_is_empty(self, spark_session, transformer, mapping):
input_df = spark_session.createDataFrame([], schema=T.StructType())
mapped_df = transformer.transform(input_df)
assert mapped_df.columns == [name for (name, path, data_type) in mapping]
class TestMultipleMappings(object):
@pytest.fixture(scope="module")
def input_columns(self, mapped_df):
return mapped_df.columns
@pytest.fixture(scope="module")
def new_mapping(self):
return [("created_date", "meta.created_at_sec", "DateType")]
@pytest.fixture(scope="module")
def new_columns(self, new_mapping):
return [name for (name, path, data_type) in new_mapping]
def test_appending_a_mapping(self, mapped_df, new_mapping, input_columns, new_columns):
"""Output schema is correct for added mapping at the end of the input schema"""
new_mapped_df = Mapper(mapping=new_mapping, mode="append", ignore_missing_columns=True).transform(mapped_df)
assert input_columns + new_columns == new_mapped_df.columns
def test_prepending_a_mapping(self, mapped_df, new_mapping, input_columns, new_columns):
"""Output schema is correct for added mapping at the beginning of the input schema"""
new_mapped_df = Mapper(mapping=new_mapping, mode="prepend", ignore_missing_columns=True).transform(mapped_df)
assert new_columns + input_columns == new_mapped_df.columns
def test_appending_a_mapping_with_duplicated_columns(self, input_columns, mapped_df):
"""Output schema is correct for newly appended mapping with columns
that are also included in the input schema"""
new_mapping = [
("created_date", "meta.created_at_sec", "DateType"),
("birthday", "birthday", "DateType"),
]
new_columns = [name for (name, path, data_type) in new_mapping]
new_columns_deduplicated = [x for x in new_columns if x not in input_columns]
new_mapped_df = Mapper(mapping=new_mapping, mode="append", ignore_missing_columns=True).transform(mapped_df)
assert input_columns + new_columns_deduplicated == new_mapped_df.columns
assert mapped_df.schema["birthday"].dataType == T.TimestampType()
assert new_mapped_df.schema["birthday"].dataType == T.DateType()
def test_prepending_a_mapping_with_duplicated_columns(self, input_columns, mapped_df):
"""Output schema is correct for newly prepended mapping with columns
that are also included in the input schema"""
new_mapping = [
("created_date", "meta.created_at_sec", "DateType"),
("birthday", "birthday", "DateType"),
]
new_columns = [name for (name, path, data_type) in new_mapping]
new_columns_deduplicated = [x for x in new_columns if x not in input_columns]
new_mapped_df = Mapper(mapping=new_mapping, mode="prepend", ignore_missing_columns=True).transform(mapped_df)
assert new_columns_deduplicated + input_columns == new_mapped_df.columns
assert mapped_df.schema["birthday"].dataType == T.TimestampType()
assert new_mapped_df.schema["birthday"].dataType == T.DateType()
class TestExceptionForMissingInputColumns(object):
"""
Raise a ValueError if a referenced input column is missing
"""
@pytest.fixture(scope="class")
def transformer(self, mapping):
return Mapper(mapping=mapping, ignore_missing_columns=False)
def test_missing_column_raises_exception(self, input_df, transformer):
input_df = input_df.drop("attributes")
with pytest.raises(AnalysisException):
transformer.transform(input_df)
def test_empty_input_dataframe_raises_exception(self, spark_session, transformer):
input_df = spark_session.createDataFrame([], schema=T.StructType())
with pytest.raises(AnalysisException):
transformer.transform(input_df)
class TestDataTypesOfMappedDataFrame(object):
@pytest.mark.parametrize(("column", "expected_data_type"), [
("id", "integer"),
("guid", "string"),
("created_at", "long"),
("created_at_ms", "long"),
("birthday", "timestamp"),
("location_struct", "struct"),
("latitude", "double"),
("longitude", "double"),
("birthday_str", "string"),
("email", "string"),
("myspace", "string"),
("first_name", "string"),
("last_name", "string"),
("gender", "string"),
("ip_address", "string"),
("university", "string"),
("friends", "array"),
("friends_json", "string"),
])
def test_data_type_of_mapped_column(self, column, expected_data_type,
mapped_df):
assert mapped_df.schema[column].dataType.typeName(
) == expected_data_type
|
rt-phb/Spooq | tests/unit/loader/test_hive_loader.py | <filename>tests/unit/loader/test_hive_loader.py
from builtins import chr
from builtins import str
from builtins import object
import pytest
from copy import deepcopy
from doubles import expect
from pyspark.sql.functions import udf as spark_udf
from pyspark.sql.functions import lit
from pyspark.sql import types as sql_types
from py4j.protocol import Py4JJavaError
from spooq2.loader import HiveLoader
@pytest.fixture(scope="function")
def default_params():
return {
"db_name": "test_hive_loader",
"table_name": "test_partitioned",
"repartition_size": 2,
"clear_partition": True,
"auto_create_table": True,
"overwrite_partition_value": True,
"partition_definitions": [
{"column_name": "partition_key_int", "column_type": "IntegerType", "default_value": 7}
],
}
@pytest.fixture()
def full_table_name(default_params):
return "{db}.{tbl}".format(db=default_params["db_name"], tbl=default_params["table_name"])
@pytest.fixture()
def default_loader(default_params):
return HiveLoader(**default_params)
@spark_udf
def convert_int_to_ascii_char(input):
return chr(input)
def construct_partition_query(partition_definitions):
partition_queries = []
for dct in partition_definitions:
if issubclass(dct["column_type"], sql_types.NumericType):
partition_queries.append("{part} = {dt}".format(part=dct["column_name"], dt=dct["default_value"]))
else:
partition_queries.append("{part} = '{dt}'".format(part=dct["column_name"], dt=dct["default_value"]))
return ", ".join(partition_queries)
class TestBasicAttributes(object):
def test_logger_should_be_accessible(self, default_loader):
assert hasattr(default_loader, "logger")
def test_name_is_set(self, default_loader):
assert default_loader.name == "HiveLoader"
def test_str_representation_is_correct(self, default_loader):
assert str(default_loader) == "Loader Object of Class HiveLoader"
class TestSinglePartitionColumn(object):
@pytest.fixture()
def input_df(self, spark_session, default_params, full_table_name):
df = spark_session.read.parquet("data/schema_v1/parquetFiles")
df = df.withColumn("partition_key_int", df.meta.version % 10) # 0-9
spark_session.conf.set("hive.exec.dynamic.partition", "true")
spark_session.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["db_name"]))
spark_session.sql("CREATE DATABASE {db}".format(db=default_params["db_name"]))
df.write.partitionBy("partition_key_int").saveAsTable(full_table_name)
yield df
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["db_name"]))
class TestWarnings(object):
def test_more_columns_than_expected(self, input_df, default_loader):
df_to_load = input_df.withColumn("5th_wheel", lit(12345))
with pytest.raises(AssertionError) as excinfo:
default_loader.load(df_to_load)
assert "Input columns don't match the columns of the Hive table" in str(excinfo.value)
def test_less_columns_than_expected(self, input_df, default_loader):
df_to_load = input_df.drop("birthday")
with pytest.raises(AssertionError) as excinfo:
default_loader.load(df_to_load)
assert "Input columns don't match the columns of the Hive table" in str(excinfo.value)
def test_different_columns_order_than_expected(self, input_df, default_loader):
df_to_load = input_df.select(list(reversed(input_df.columns)))
with pytest.raises(AssertionError) as excinfo:
default_loader.load(df_to_load)
assert "Input columns don't match the columns of the Hive table" in str(excinfo.value)
class TestClearPartition(object):
"""Clearing the Hive Table Partition before inserting"""
@pytest.mark.parametrize("partition", [0, 2, 3, 6, 9])
def test_clear_partition(self, spark_session, input_df, partition, default_params, full_table_name):
"""Partition is dropped"""
default_params["partition_definitions"][0]["default_value"] = partition
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(loader.partition_definitions)
inverted_partition_query = partition_query.replace("=", "!=").replace(", ", " and ")
expected_count = input_df.where("partition_key_int != " + str(partition)).count()
loader._clear_hive_partition()
actual_count = spark_session.table(full_table_name).count()
assert actual_count == expected_count
def test_clear_partition_is_called_exactly_once(self, default_loader, input_df):
"""Clear Partition is called exactly once (Default)"""
expect(default_loader)._clear_hive_partition.exactly(1).time
default_loader.load(input_df)
def test_clear_partition_is_not_called(self, default_loader, input_df):
"""Clear Partition is not called (Default Values was Overridden)"""
default_loader.clear_partition = False
expect(default_loader)._clear_hive_partition.exactly(0).time
default_loader.load(input_df)
class TestPartitionDefinitions(object):
@pytest.mark.parametrize(
"partition_definitions", ["Some string", 123, 75.0, b"abcd", ("Hello", "World"), {"Nice_to": "meet_you"}]
)
def test_input_is_not_a_list(self, partition_definitions, default_params):
default_params["partition_definitions"] = partition_definitions
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "partition_definitions has to be a list containing dicts" in str(excinfo.value)
@pytest.mark.parametrize("partition_definitions", ["Some string", 123, 75.0, b"abcd", ("Hello", "World")])
def test_list_input_contains_non_dict_items(self, partition_definitions, default_params):
default_params["partition_definitions"] = [partition_definitions]
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "Items of partition_definitions have to be dictionaries" in str(excinfo.value)
def test_column_name_is_missing(self, default_params):
default_params["partition_definitions"][0]["column_name"] = None
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "No column name set!" in str(excinfo.value)
@pytest.mark.parametrize("data_type", [13, "no_spark_type", "arrray", "INT", ["IntegerType", "StringType"]])
def test_column_type_not_a_valid_spark_sql_type(self, data_type, default_params):
default_params["partition_definitions"][0]["column_type"] = data_type
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "Not a valid (PySpark) datatype for the partition column" in str(excinfo.value)
@pytest.mark.parametrize("default_value", [None, "", [], {}])
def test_default_value_is_empty(self, default_value, default_params, input_df):
default_params["partition_definitions"][0]["default_value"] = default_value
with pytest.raises(AssertionError) as excinfo:
loader = HiveLoader(**default_params)
loader.load(input_df)
assert "No default partition value set for partition column" in str(excinfo.value)
def test_default_value_is_missing(self, default_params, input_df):
default_params["partition_definitions"][0].pop("default_value")
with pytest.raises(AssertionError) as excinfo:
loader = HiveLoader(**default_params)
loader.load(input_df)
assert "No default partition value set for partition column" in str(excinfo.value)
@pytest.mark.parametrize("partition", [0, 2, 3, 6, 9])
class TestLoadPartition(object):
def test_add_new_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
default_params["partition_definitions"][0]["default_value"] = partition
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(default_params["partition_definitions"])
inverted_partition_query = partition_query.replace("=", "!=").replace(", ", " and ")
df_to_load = input_df.where(partition_query)
count_pre_total = input_df.where(inverted_partition_query).count()
count_to_load = df_to_load.count()
count_post_total = input_df.count()
assert (
count_post_total == count_pre_total + count_to_load
), "Something went wrong in the test setup of the input dataframe (input_df)"
spark_session.sql(
"alter table {tbl} drop partition ({part_def})".format(tbl=full_table_name, part_def=partition_query)
)
assert (
spark_session.table(full_table_name).count() == count_pre_total
), "test partition was not successfully dropped from output hive table"
assert df_to_load.count() > 0, "Dataframe to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_overwrite_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
default_params["partition_definitions"][0]["default_value"] = partition
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(default_params["partition_definitions"])
df_to_load = input_df.where(partition_query)
count_pre_total = spark_session.table(full_table_name).count()
count_to_load = input_df.where(partition_query).count()
count_post_total = input_df.count()
assert (
count_post_total == count_pre_total
), "Something went wrong in the test setup of the input DataFrame (input_df)"
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_append_to_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
default_params["partition_definitions"][0]["default_value"] = partition
default_params["clear_partition"] = False
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(default_params["partition_definitions"])
df_to_load = input_df.where(partition_query)
count_pre_total = spark_session.table(full_table_name).count()
count_to_load = df_to_load.count()
count_post_total = count_pre_total + count_to_load
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_create_partitioned_table(self, input_df, default_params, partition, full_table_name, spark_session):
default_params["partition_definitions"][0]["default_value"] = partition
default_params["auto_create_table"] = True
loader = HiveLoader(**default_params)
spark_session.sql("drop table if exists " + full_table_name)
spark_session.catalog.setCurrentDatabase(default_params["db_name"])
assert default_params["table_name"] not in [
tbl.name for tbl in spark_session.catalog.listTables()
], "Test setup of database is not clean. Table already exists!"
partition_query = construct_partition_query(default_params["partition_definitions"])
df_to_load = input_df.where(partition_query)
count_to_load = df_to_load.count()
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert default_params["table_name"] in [
tbl.name for tbl in spark_session.catalog.listTables()
], "Table was not created!"
assert (
spark_session.table(full_table_name).count() == count_to_load
), "test partition was not successfully loaded to automatically created output hive table"
try:
assert spark_session.sql("show partitions " + full_table_name).count() > 0
except Py4JJavaError as e:
raise AssertionError("Created table is not partitioned. " + str(e))
def test_add_new_static_partition_with_overwritten_partition_value(
self, input_df, default_params, partition, full_table_name, spark_session
):
default_params["partition_definitions"][0]["default_value"] = partition
default_params["clear_partition"] = False
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(default_params["partition_definitions"])
inverted_partition_query = partition_query.replace("=", "!=").replace(", ", " and ")
output_table = spark_session.table(full_table_name)
count_pre_partition = output_table.where(partition_query).count()
count_post_partition = input_df.count()
count_post_total = input_df.count() * 2
assert input_df.count() > 0, "Dataframe to load is empty!"
loader.load(input_df)
assert (
output_table.count() == count_post_total
), "test partition was not successfully loaded to output hive table"
assert (
output_table.where(partition_query).count() == input_df.count() + count_pre_partition
), "test partition was not successfully loaded to output hive table"
class TestMultiplePartitionColumn(object):
@pytest.fixture()
def input_df(self, spark_session, default_params, full_table_name):
df = spark_session.read.parquet("data/schema_v1/parquetFiles")
df = df.withColumn("partition_key_int", df.meta.version % 10) # 0-9
df = df.withColumn("partition_key_str", convert_int_to_ascii_char(df.partition_key_int + 100)) # d-m
spark_session.conf.set("hive.exec.dynamic.partition", "true")
spark_session.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["db_name"]))
spark_session.sql("CREATE DATABASE {db}".format(db=default_params["db_name"]))
df.write.partitionBy("partition_key_int", "partition_key_str").saveAsTable(full_table_name)
yield df
spark_session.sql("DROP DATABASE IF EXISTS {db} CASCADE".format(db=default_params["db_name"]))
@pytest.fixture(scope="function")
def default_params(self):
return {
"db_name": "test_hive_loader",
"table_name": "test_partitioned",
"repartition_size": 2,
"clear_partition": True,
"auto_create_table": True,
"overwrite_partition_value": True,
"partition_definitions": [
{"column_name": "partition_key_int", "column_type": "IntegerType", "default_value": 7},
{"column_name": "partition_key_str", "column_type": "StringType", "default_value": "k"},
],
}
class TestClearPartition(object):
"""Clearing the Hive Table Partition before inserting"""
# input_df.groupBy("partition_key_int", "partition_key_str").count().orderBy("partition_key_int", "partition_key_str").show(200)
@pytest.mark.parametrize("partition", [[0, "d"], [2, "f"], [3, "g"], [6, "j"], [9, "m"]])
def test_clear_partition(self, spark_session, input_df, partition, default_params, full_table_name):
"""Partition is dropped"""
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
loader = HiveLoader(**default_params)
partition_query = construct_partition_query(loader.partition_definitions).replace(", ", " and ")
inverted_partition_query = partition_query.replace("=", "!=")
expected_count = input_df.where(inverted_partition_query).count()
loader._clear_hive_partition()
actual_count = spark_session.table(full_table_name).count()
assert actual_count == expected_count
def test_clear_partition_is_called_exactly_once(self, default_loader, input_df):
"""Clear Partition is called exactly once (Default)"""
expect(default_loader)._clear_hive_partition.exactly(1).time
default_loader.load(input_df)
def test_clear_partition_is_not_called(self, default_loader, input_df):
"""Clear Partition is not called (Default Values was Overridden)"""
default_loader.clear_partition = False
expect(default_loader)._clear_hive_partition.exactly(0).time
default_loader.load(input_df)
class TestPartitionDefinitions(object):
@pytest.mark.parametrize(
"partition_definitions", ["Some string", 123, 75.0, b"abcd", ("Hello", "World"), {"Nice_to": "meet_you"}]
)
def test_input_is_not_a_list(self, partition_definitions, default_params):
default_params["partition_definitions"] = partition_definitions
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "partition_definitions has to be a list containing dicts" in str(excinfo.value)
@pytest.mark.parametrize("partition_definitions", ["Some string", 123, 75.0, b"abcd", ("Hello", "World")])
def test_list_input_contains_non_dict_items(self, partition_definitions, default_params):
default_params["partition_definitions"] = [partition_definitions]
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "Items of partition_definitions have to be dictionaries" in str(excinfo.value)
def test_column_name_is_missing(self, default_params):
default_params["partition_definitions"][0]["column_name"], default_params["partition_definitions"][1]["column_name"] = None, "f"
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "No column name set!" in str(excinfo.value)
@pytest.mark.parametrize("data_type", [13, "no_spark_type", "arrray", "INT", ["IntegerType", "StringType"]])
def test_column_type_not_a_valid_spark_sql_type(self, data_type, default_params):
default_params["partition_definitions"][0]["column_type"], default_params["partition_definitions"][0]["column_type"] = "IntegerType", data_type
with pytest.raises(AssertionError) as excinfo:
HiveLoader(**default_params)
assert "Not a valid (PySpark) datatype for the partition column" in str(excinfo.value)
@pytest.mark.parametrize("default_value", [None, "", [], {}])
def test_default_value_is_empty(self, default_value, default_params, input_df):
default_params["partition_definitions"][0]["default_value"], default_params["partition_definitions"][0]["default_value"] = 3, default_value
with pytest.raises(AssertionError) as excinfo:
loader = HiveLoader(**default_params)
loader.load(input_df)
assert "No default partition value set for partition column" in str(excinfo.value)
def test_default_value_is_missing(self, default_params, input_df):
default_params["partition_definitions"][1].pop("default_value")
with pytest.raises(AssertionError) as excinfo:
loader = HiveLoader(**default_params)
loader.load(input_df)
assert "No default partition value set for partition column" in str(excinfo.value)
@pytest.mark.parametrize("partition", [[0, "d"], [2, "f"], [3, "g"], [6, "j"], [9, "m"]])
class TestLoadPartition(object):
def test_add_new_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
loader = HiveLoader(**default_params)
partition_clause = construct_partition_query(loader.partition_definitions)
where_clause = partition_clause.replace(", ", " and ")
where_clause_inverted = where_clause.replace("=", "!=")
df_to_load = input_df.where(where_clause)
count_pre_total = input_df.where(where_clause_inverted).count()
count_to_load = df_to_load.count()
count_post_total = input_df.count()
assert (
count_post_total == count_pre_total + count_to_load
), "Something went wrong in the test setup of the input dataframe (input_df)"
spark_session.sql(
"alter table {tbl} drop partition ({part_def})".format(tbl=full_table_name, part_def=partition_clause)
)
assert (
spark_session.table(full_table_name).count() == count_pre_total
), "test partition was not successfully dropped from output hive table"
assert df_to_load.count() > 0, "Dataframe to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_overwrite_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
loader = HiveLoader(**default_params)
where_clause = construct_partition_query(loader.partition_definitions).replace(", ", " and ")
df_to_load = input_df.where(where_clause)
count_pre_total = spark_session.table(full_table_name).count()
count_to_load = df_to_load.count()
count_post_total = input_df.count()
assert (
count_post_total == count_pre_total
), "Something went wrong in the test setup of the input DataFrame (input_df)"
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_append_to_static_partition(self, input_df, default_params, partition, full_table_name, spark_session):
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
default_params["clear_partition"] = False
loader = HiveLoader(**default_params)
where_clause = construct_partition_query(loader.partition_definitions).replace(", ", " and ")
#
df_to_load = input_df.where(where_clause)
count_pre_total = spark_session.table(full_table_name).count()
count_to_load = df_to_load.count()
count_post_total = count_pre_total + count_to_load
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert (
spark_session.table(full_table_name).count() == count_post_total
), "test partition was not successfully loaded to output hive table"
def test_create_partitioned_table(self, input_df, default_params, partition, full_table_name, spark_session):
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
default_params["auto_create_table"] = True
loader = HiveLoader(**default_params)
spark_session.sql("drop table if exists " + full_table_name)
spark_session.catalog.setCurrentDatabase(default_params["db_name"])
assert default_params["table_name"] not in [
tbl.name for tbl in spark_session.catalog.listTables()
], "Test setup of database is not clean. Table already exists!"
where_clause = construct_partition_query(loader.partition_definitions).replace(", ", " and ")
df_to_load = input_df.where(where_clause)
count_to_load = df_to_load.count()
assert df_to_load.count() > 0, "DataFrame to load is empty!"
loader.load(df_to_load)
spark_session.catalog.refreshTable(full_table_name)
assert default_params["table_name"] in [
tbl.name for tbl in spark_session.catalog.listTables()
], "Table was not created!"
assert (
spark_session.table(full_table_name).count() == count_to_load
), "test partition was not successfully loaded to automatically created output hive table"
try:
assert spark_session.sql("show partitions " + full_table_name).count() > 0
except Py4JJavaError as e:
raise AssertionError("Created table is not partitioned. " + str(e))
def test_add_new_static_partition_with_overwritten_partition_value(
self, input_df, default_params, partition, full_table_name, spark_session
):
(
default_params["partition_definitions"][0]["default_value"],
default_params["partition_definitions"][1]["default_value"],
) = partition
default_params["clear_partition"] = False
loader = HiveLoader(**default_params)
where_clause = construct_partition_query(loader.partition_definitions).replace(", ", " and ")
output_table = spark_session.table(full_table_name)
count_pre_partition = output_table.where(where_clause).count()
count_post_partition = input_df.count()
count_post_total = input_df.count() * 2
assert input_df.count() > 0, "Dataframe to load is empty!"
loader.load(input_df)
assert (
output_table.count() == count_post_total
), "test partition was not successfully loaded to output hive table"
assert (
output_table.where(where_clause).count() == input_df.count() + count_pre_partition
), "test partition was not successfully loaded to output hive table"
|
rt-phb/Spooq | docs/source/base_classes/create_extractor/init.py | from jdbc import JDBCExtractorIncremental, JDBCExtractorFullLoad
from json_files import JSONExtractor
from csv_extractor import CSVExtractor
__all__ = [
"JDBCExtractorIncremental",
"JDBCExtractorFullLoad",
"JSONExtractor",
"CSVExtractor",
]
|
rt-phb/Spooq | tests/unit/transformer/test_threshold_cleaner.py | from builtins import str
from builtins import object
import pytest
from chispa.dataframe_comparer import assert_df_equality
from pyspark.sql import types as sql_types
from pyspark.sql import functions as F
from pyspark.sql import Row
import datetime as dt
from spooq2.transformer import ThresholdCleaner
class TestBasicAttributes(object):
""" Basic attributes of the transformer instance """
def test_has_logger(self):
assert hasattr(ThresholdCleaner(), "logger")
def test_has_name(self):
assert ThresholdCleaner().name == "ThresholdCleaner"
def test_has_str_representation(self):
assert str(ThresholdCleaner()) == "Transformer Object of Class ThresholdCleaner"
class TestCleaning(object):
# fmt: off
@pytest.fixture(scope="module")
def input_df(self, spark_session):
input_data = [
#ids #floats #integers #strings #timestamps #datetimes
[0, 12.0, 12, "12", dt.datetime(1850,1,1, 12,0,0), dt.date(1850,1,1)],
[1, 65.7, 65, "65", dt.datetime(2020,6,1, 12,0,0), dt.date(2020,6,1)],
[2, 300.0, 300, "300", dt.datetime(2020,6,1, 15,0,0), dt.date(2020,6,15)],
[4, 5000.0, 5000, "5000", dt.datetime(2020,6,1, 16,0,0), dt.date(2020,7,1)],
[5, -75.0, -75, "-75", dt.datetime(9999,1,1, 12,0,0), dt.date(9999,1,1)],
]
schema = sql_types.StructType(
[
sql_types.StructField("id", sql_types.IntegerType(), True),
sql_types.StructField("floats", sql_types.DoubleType(), False),
sql_types.StructField("integers", sql_types.LongType(), False),
sql_types.StructField("strings", sql_types.StringType(), False),
sql_types.StructField("timestamps", sql_types.TimestampType(), False),
sql_types.StructField("datetimes", sql_types.DateType(), False),
]
)
return spark_session.createDataFrame(input_data, schema=schema)
@pytest.fixture(scope="module")
def thresholds(self):
return {
"integers": {"min": 1, "max": 300},
"floats": {"min": 1.0, "max": 300.0},
"strings": {"min": "1", "max": "300"},
"timestamps": {"min": dt.datetime(2020,6,1,12,0,0), "max": dt.datetime(2020,6,1,16,0,0)},
"datetimes": {"min": dt.date(2020,6,1), "max": "2020-7-1"},
}
@pytest.fixture(scope="module")
def expected_result(self):
return {
"integers": [ 12, 65, 300, None, None],
"floats": [ 12.0, 65.7, 300.0, None, None],
"strings": ["12", "65", "300", None, None],
"timestamps": [None, dt.datetime(2020,6,1, 12,0,0), dt.datetime(2020,6,1, 15,0,0),
dt.datetime(2020,6,1, 16,0,0), None],
"datetimes": [None, dt.date(2020,6,1), dt.date(2020,6,15), dt.date(2020,7,1), None],
}
# fmt: on
@pytest.mark.parametrize("column_name", ["integers", "floats", "timestamps", "datetimes"])
def test_clean_supported_format(self, column_name, input_df, thresholds, expected_result):
thresholds_to_test = {column_name: thresholds[column_name]}
transformer = ThresholdCleaner(thresholds=thresholds_to_test)
df_cleaned = transformer.transform(input_df)
result = [x[column_name] for x in df_cleaned.collect()]
expected = expected_result[column_name]
assert result == expected
assert input_df.columns == df_cleaned.columns
@pytest.mark.parametrize("column_name", ["strings"])
def test_raise_exception_for_unsupported_format(self, column_name, input_df, thresholds):
thresholds_to_test = dict([k_v1 for k_v1 in list(thresholds.items()) if k_v1[0] == column_name])
transformer = ThresholdCleaner(thresholds_to_test)
with pytest.raises(ValueError):
transformer.transform(input_df).count()
def test_dynamic_default_value(self, spark_session):
input_df = spark_session.createDataFrame([
Row(id=1, num=1),
Row(id=2, num=2),
Row(id=3, num=100),
Row(id=4, num=4),
Row(id=5, num=-1024),
])
thresholds_to_test = dict(num=dict(min=0, max=99, default=F.col("id") * -1))
output_df = ThresholdCleaner(thresholds_to_test).transform(input_df)
expected_output_df = spark_session.createDataFrame([
Row(id=1, num=1),
Row(id=2, num=2),
Row(id=3, num=-3),
Row(id=4, num=4),
Row(id=5, num=-5),
])
assert_df_equality(expected_output_df, output_df)
|
rt-phb/Spooq | docs/source/base_classes/create_loader/init.py | from loader import Loader
from hive_loader import HiveLoader
from parquet import ParquetLoader
__all__ = [
"Loader",
"HiveLoader",
"ParquetLoader",
]
|
rt-phb/Spooq | docs/source/base_classes/create_extractor/test_csv.py | <filename>docs/source/base_classes/create_extractor/test_csv.py<gh_stars>0
import pytest
from spooq2.extractor import CSVExtractor
@pytest.fixture()
def default_extractor():
return CSVExtractor(input_path="data/input_data.csv")
class TestBasicAttributes(object):
def test_logger_should_be_accessible(self, default_extractor):
assert hasattr(default_extractor, "logger")
def test_name_is_set(self, default_extractor):
assert default_extractor.name == "CSVExtractor"
def test_str_representation_is_correct(self, default_extractor):
assert unicode(default_extractor) == "Extractor Object of Class CSVExtractor"
class TestCSVExtraction(object):
def test_count(default_extractor):
"""Converted DataFrame has the same count as the input data"""
expected_count = 312
actual_count = default_extractor.extract().count()
assert expected_count == actual_count
def test_schema(default_extractor):
"""Converted DataFrame has the expected schema"""
do_some_stuff()
assert expected == actual
|
rt-phb/Spooq | tests/data/convert_json_files.py | <gh_stars>1-10
from random import randint
from pyspark.sql import functions as F
SCHEMA_VERSION = "1"
INPUT_FILE_NAME = "schema_v{nr}.json".format(nr=SCHEMA_VERSION)
DATE_COLUMNS_TO_CONVERT = [("attributes.birthday", "birthday")]
rdd_text = spark.sparkContext.textFile(INPUT_FILE_NAME)
rdd_text.coalesce(2).saveAsTextFile(
"schema_v{nr}/textFiles".format(nr=SCHEMA_VERSION))
rdd_seq = rdd_text.map(lambda x:
(randint(1000, 100000), bytearray(x, "utf-8")))
rdd_seq.coalesce(2).saveAsSequenceFile(
"schema_v{nr}/sequenceFiles".format(nr=SCHEMA_VERSION))
df = spark.read.json(rdd_text)
for input_col, output_col in DATE_COLUMNS_TO_CONVERT:
df = df.withColumn(
output_col,
F.to_utc_timestamp(timestamp=F.to_timestamp(df[input_col]),
tz="Europe/Vienna"))
df.coalesce(2).write.parquet(
"schema_v{nr}/parquetFiles".format(nr=SCHEMA_VERSION))
|
rt-phb/Spooq | src/spooq2/transformer/newest_by_group.py | <reponame>rt-phb/Spooq
from __future__ import absolute_import
from builtins import str
from pyspark.sql.window import Window # noqa: F401
from pyspark.sql.functions import row_number, when
from .transformer import Transformer
class NewestByGroup(Transformer):
"""
Groups, orders and selects first element per group.
Example
-------
>>> transformer = NewestByGroup(
>>> group_by=["first_name", "last_name"],
>>> order_by=["created_at_ms", "version"]
>>> )
Parameters
----------
group_by : :any:`str` or :any:`list` of :any:`str`, (Defaults to ['id'])
List of attributes to be used within the Window Function as Grouping Arguments.
order_by : :any:`str` or :any:`list` of :any:`str`, (Defaults to ['updated_at', 'deleted_at'])
List of attributes to be used within the Window Function as Ordering Arguments.
All columns will be sorted in **descending** order.
Raises
------
:any:`exceptions.AttributeError`
If any Attribute in :py:data:`group_by` or :py:data:`order_by` is not contained in the
input DataFrame.
Note
----
PySpark's :py:class:`~pyspark.sql.Window` function is used internally
The first row (:py:meth:`~pyspark.sql.functions.row_number`) per window will be selected and returned.
"""
def __init__(self, group_by=["id"], order_by=["updated_at", "deleted_at"]):
super(NewestByGroup, self).__init__()
self.group_by = []
if isinstance(group_by, list):
self.group_by.extend(group_by)
else:
self.group_by.extend([group_by])
self.order_by = []
if isinstance(order_by, list):
self.order_by.extend(order_by)
else:
self.order_by.extend([order_by])
self.logger.debug("group by columns: " + str(self.group_by))
self.logger.debug("order by columns: " + str(self.order_by))
def transform(self, input_df):
self.logger.debug(
"""{grp_by} used for grouping, {ord_by} used for ordering""".format(
grp_by=self.group_by, ord_by=self.order_by
)
)
window = self._construct_window_function(input_df, self.group_by, self.order_by)
return (
input_df.select("*", row_number().over(window).alias("row_nr"))
.where("row_nr = 1")
.drop("row_nr")
)
def _construct_window_function(self, input_df, group_by, order_by):
"""Constructs a window function based on the given input params"""
group_by_query = [input_df[col] for col in group_by]
order_by_query = [input_df[col].desc_nulls_last() for col in order_by]
return Window.partitionBy(group_by_query).orderBy(order_by_query)
|
rt-phb/Spooq | src/spooq2/transformer/transformer.py | """
Transformers take a :py:class:`pyspark.sql.DataFrame` as an input, transform it accordingly
and return a PySpark DataFrame.
Each Transformer class has to have a `transform` method which takes no arguments
and returns a PySpark DataFrame.
Possible transformation methods can be **Selecting the most up to date record by id**,
**Exploding an array**, **Filter (on an exploded array)**, **Apply basic threshold cleansing** or
**Map the incoming DataFrame to at provided structure**.
"""
from builtins import object
import logging
class Transformer(object):
"""
Base Class of Transformer Classes.
Attributes
----------
name : :any:`str`
Sets the `__name__` of the class' type as `name`, which is essentially the Class' Name.
logger : :any:`logging.Logger`
Shared, class level logger for all instances.
"""
def __init__(self):
self.name = type(self).__name__
self.logger = logging.getLogger("spooq2")
def transform(self, input_df):
"""
Performs a transformation on a DataFrame.
Parameters
----------
input_df : :py:class:`pyspark.sql.DataFrame`
Input DataFrame
Returns
-------
:py:class:`pyspark.sql.DataFrame`
Transformed DataFrame.
Note
----
This method does only take the Input DataFrame as a parameters. All other needed parameters
are defined in the initialization of the Transformator Object.
"""
raise NotImplementedError("This method has to be implemented in the subclasses")
def __str__(self):
return "Transformer Object of Class {nm}".format(nm=self.name)
|
rt-phb/Spooq | docs/source/base_classes/create_loader/test_parquet.py | import pytest
from pyspark.sql.dataframe import DataFrame
from spooq2.loader import ParquetLoader
@pytest.fixture(scope="module")
def output_path(tmpdir_factory):
return str(tmpdir_factory.mktemp("parquet_output"))
@pytest.fixture(scope="module")
def default_loader(output_path):
return ParquetLoader(
path=output_path,
partition_by="attributes.gender",
explicit_partition_values=None,
compression_codec=None
)
@pytest.fixture(scope="module")
def input_df(spark_session):
return spark_session.read.parquet("../data/schema_v1/parquetFiles")
@pytest.fixture(scope="module")
def loaded_df(default_loader, input_df, spark_session, output_path):
default_loader.load(input_df)
return spark_session.read.parquet(output_path)
class TestBasicAttributes(object):
def test_logger_should_be_accessible(self, default_loader):
assert hasattr(default_loader, "logger")
def test_name_is_set(self, default_loader):
assert default_loader.name == "ParquetLoader"
def test_str_representation_is_correct(self, default_loader):
assert unicode(default_loader) == "loader Object of Class ParquetLoader"
class TestParquetLoader(object):
def test_count_did_not_change(loaded_df, input_df):
"""Persisted DataFrame has the same number of records than the input DataFrame"""
assert input_df.count() == output_df.count() and input_df.count() > 0
def test_schema_is_unchanged(loaded_df, input_df):
"""Loaded DataFrame has the same schema as the input DataFrame"""
assert loaded.schema == input_df.schema
|
rt-phb/Spooq | tests/unit/transformer/test_flattener.py | <filename>tests/unit/transformer/test_flattener.py
import pytest
from chispa.dataframe_comparer import assert_df_equality
from pyspark.sql import Row
import datetime
from spooq2.transformer import Flattener
@pytest.fixture
def flattener():
return Flattener(pretty_names=False)
def assert_mapping_equality(mapping_1, mapping_2, spark):
if mapping_1 == mapping_2:
return True
else: # for easier debugging
assert_df_equality(
spark.createDataFrame(mapping_1, ["name", "source", "type"]),
spark.createDataFrame(mapping_2, ["name", "source", "type"])
)
class TestBasicAttributes:
"""Mapper for Flattening DataFrames"""
def test_logger_should_be_accessible(self, flattener):
assert hasattr(flattener, "logger")
def test_name_is_set(self, flattener):
assert flattener.name == "Flattener"
def test_str_representation_is_correct(self, flattener):
assert str(flattener) == "Transformer Object of Class Flattener"
class TestAlreadyFlatDataFrames:
def test_single_column(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
string_val="Hello World"
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
("Hello World", )], schema=["string_val"])
assert_df_equality(expected_output_df, output_df)
def test_multiple_columns_of_same_datatype(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
int_val_1=4789,
int_val_2=4790,
int_val_3=4791
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, 4790, 4791)], schema=["int_val_1", "int_val_2", "int_val_3"])
assert_df_equality(expected_output_df, output_df)
def test_multiple_columns_of_different_datatype(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
int_val=4789,
string_val="Hello World",
date_val=datetime.date(2021, 1, 14)
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, "Hello World", datetime.date(2021, 1, 14))], schema=["int_val", "string_val", "date_val"])
assert_df_equality(expected_output_df, output_df)
def test_multiple_columns_of_different_datatype_keeping_original_columns(self, spark_session):
input_df = spark_session.createDataFrame([Row(
int_val=4789,
string_val="Hello World",
date_val=datetime.date(2021, 1, 14)
)])
flattener = Flattener(keep_original_columns=True)
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
Row(original_columns=Row(int_val=4789, string_val="Hello World", date_val=datetime.date(2021, 1, 14)),
int_val=4789, string_val="Hello World", date_val=datetime.date(2021, 1, 14))])
expected_output_df.schema["original_columns"].nullable = False
assert_df_equality(output_df, expected_output_df)
class TestDataFrameContainingArrays:
def test_single_array(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[4789, 4790, 4791]
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, ), (4790, ), (4791, )], schema=["array_val"])
assert_df_equality(expected_output_df, output_df)
def test_single_array_with_other_columns(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[4789, 4790, 4791],
timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14)
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(datetime.datetime(2021, 1, 14, 8, 10, 14), 4789),
(datetime.datetime(2021, 1, 14, 8, 10, 14), 4790),
(datetime.datetime(2021, 1, 14, 8, 10, 14), 4791)],
schema=["timestamp_val", "array_val"])
assert_df_equality(expected_output_df, output_df)
def test_single_array_with_other_columns_keeping_original_columns(self, spark_session):
input_df = spark_session.createDataFrame([Row(
array_val=[4789, 4790, 4791],
timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14)
)])
flattener = Flattener(keep_original_columns=True)
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
Row(original_columns=Row(array_val=[4789, 4790, 4791], timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14)),
timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14), array_val=4789),
Row(original_columns=Row(array_val=[4789, 4790, 4791], timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14)),
timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14), array_val=4790),
Row(original_columns=Row(array_val=[4789, 4790, 4791], timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14)),
timestamp_val=datetime.datetime(2021, 1, 14, 8, 10, 14), array_val=4791),
])
expected_output_df.schema["original_columns"].nullable = False
assert_df_equality(expected_output_df, output_df)
def test_multiple_arrays(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val_1=[4789, 4790, 4791],
array_val_2=["How", "Are", "You", "?"]
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, "How"),
(4789, "Are"),
(4789, "You"),
(4789, "?"),
(4790, "How"),
(4790, "Are"),
(4790, "You"),
(4790, "?"),
(4791, "How"),
(4791, "Are"),
(4791, "You"),
(4791, "?")],
schema=["array_val_1", "array_val_2"])
assert_df_equality(expected_output_df, output_df)
def test_multiple_arrays_with_other_columns(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val_1=[4789, 4790, 4791],
array_val_2=["How", "Are", "You", "?"],
double_val=43.102
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(43.102, 4789, "How"),
(43.102, 4789, "Are"),
(43.102, 4789, "You"),
(43.102, 4789, "?"),
(43.102, 4790, "How"),
(43.102, 4790, "Are"),
(43.102, 4790, "You"),
(43.102, 4790, "?"),
(43.102, 4791, "How"),
(43.102, 4791, "Are"),
(43.102, 4791, "You"),
(43.102, 4791, "?")],
schema=["double_val", "array_val_1", "array_val_2"])
assert_df_equality(expected_output_df, output_df)
def test_array_nested_in_array(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[["Here's", "My", "Number", ":"], [555, 127, 53, 90]],
string_val="How are you?"
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
("How are you?", "Here's"),
("How are you?", "My"),
("How are you?", "Number"),
("How are you?", ":"),
("How are you?", "555"),
("How are you?", "127"),
("How are you?", "53"),
("How are you?", "90")],
schema=["string_val", "array_val"])
assert_df_equality(expected_output_df, output_df)
def test_array_nested_in_struct(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(array_val=[4789, 4790, 4791],
string_val="How are you?")
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
("How are you?", 4789),
("How are you?", 4790),
("How are you?", 4791)],
schema=["struct_val_string_val", "struct_val_array_val"])
assert_df_equality(expected_output_df, output_df)
def test_struct_nested_in_array(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[Row(int_val=4789,
string_val="Hello Darkness",
date_val=datetime.date(2021, 1, 14)),
Row(int_val=4790,
string_val="My Old Friend",
date_val=datetime.date(2021, 1, 15))],
double_val=43.102
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(43.102, 4789, "Hello Darkness", datetime.date(2021, 1, 14)),
(43.102, 4790, "My Old Friend", datetime.date(2021, 1, 15))],
schema=["double_val", "array_val_int_val",
"array_val_string_val", "array_val_date_val"])
assert_df_equality(expected_output_df, output_df)
class TestDataFrameContainingStructs:
def test_single_struct_single_attribute(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(int_val=4789)
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, )], schema=["struct_val_int_val"])
assert_df_equality(expected_output_df, output_df)
def test_single_struct_multiple_attributes(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(int_val=4789, string_val="Hello World")
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(4789, "Hello World")], schema=["struct_val_int_val", "struct_val_string_val"])
assert_df_equality(expected_output_df, output_df)
def test_nested_struct_attributes(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
struct_val_1=Row(
struct_val_2=Row(
struct_val_3=Row(
struct_val_4=Row(int_val=4789),
long_val=478934243342334),
string_val="Hello"),
double_val=43.12),
timestamp_val=datetime.datetime(2021, 1, 1, 12, 30, 15)
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame(
[(4789, 478934243342334, "Hello", 43.12, datetime.datetime(2021, 1, 1, 12, 30, 15))],
schema=["struct_val_1_struct_val_2_struct_val_3_struct_val_4_int_val",
"struct_val_1_struct_val_2_struct_val_3_long_val",
"struct_val_1_struct_val_2_string_val",
"struct_val_1_double_val",
"timestamp_val"]
)
assert_df_equality(expected_output_df, output_df)
class TestComplexRecipes:
@pytest.fixture(scope="class")
def input_df(self, spark_session):
"""Taken from https://opensource.adobe.com/Spry/samples/data_region/JSONDataSetSample.html"""
return spark_session.createDataFrame([Row(
batters=Row(
batter=[Row(id="1001", type="Regular"),
Row(id="1002", type="Chocolate"),
Row(id="1003", type="Blueberry"),
Row(id="1004", type="Devil's Food")]),
id="0001",
name="Cake",
ppu=0.55,
topping=[Row(id="5001", type="None"),
Row(id="5002", type="Glazed"),
Row(id="5005", type="Sugar"),
Row(id="5007", type="Powdered Sugar"),
Row(id="5006", type="Chocolate with Sprinkles"),
Row(id="5003", type="Chocolate"),
Row(id="5004", type="Maple")],
type="donut",
)])
@pytest.fixture(scope="class")
def expected_output_data(self):
return [
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5001", "None" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5002", "Glazed" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5005", "Sugar" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5007", "Powdered Sugar" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5006", "Chocolate with Sprinkles" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5003", "Chocolate" ),
("0001", "Cake", 0.55, "donut", "1001", "Regular", "5004", "Maple" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5001", "None" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5002", "Glazed" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5005", "Sugar" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5007", "Powdered Sugar" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5006", "Chocolate with Sprinkles" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5003", "Chocolate" ),
("0001", "Cake", 0.55, "donut", "1002", "Chocolate", "5004", "Maple" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5001", "None" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5002", "Glazed" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5005", "Sugar" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5007", "Powdered Sugar" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5006", "Chocolate with Sprinkles" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5003", "Chocolate" ),
("0001", "Cake", 0.55, "donut", "1003", "Blueberry", "5004", "Maple" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5001", "None" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5002", "Glazed" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5005", "Sugar" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5007", "Powdered Sugar" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5006", "Chocolate with Sprinkles" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5003", "Chocolate" ),
("0001", "Cake", 0.55, "donut", "1004", "Devil's Food", "5004", "Maple" )]
@pytest.fixture(scope="class")
def expected_output_df(self, expected_output_data, spark_session):
return spark_session.createDataFrame(
expected_output_data,
schema=["id", "name", "ppu", "type", "batters_batter_id", "batters_batter_type",
"topping_id", "topping_type"])
@pytest.fixture(scope="class")
def expected_output_df_pretty(self, expected_output_data, spark_session):
return spark_session.createDataFrame(
expected_output_data,
schema=["id", "name", "ppu", "type", "batter_id", "batter_type",
"topping_id", "topping_type"])
@pytest.fixture(scope="class")
def expected_output_df_pretty_with_original_columns(self, expected_output_data, input_df, spark_session):
original_columns = input_df.first()
expected_output_data_with_original_columns = [
(original_columns, *row)
for row
in expected_output_data
]
output_df = spark_session.createDataFrame(
expected_output_data_with_original_columns,
schema=["original_columns", "id", "name", "ppu", "type", "batter_id", "batter_type",
"topping_id", "topping_type"])
output_df.schema["original_columns"].nullable = False
return output_df
def test_donut(self, input_df, expected_output_df, flattener):
output_df = flattener.transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_pretty_donut(self, input_df, expected_output_df_pretty):
flattener = Flattener(pretty_names=True)
output_df = flattener.transform(input_df)
assert_df_equality(expected_output_df_pretty, output_df)
def test_pretty_donut_with_original_columns(self, input_df, expected_output_df_pretty_with_original_columns):
flattener = Flattener(pretty_names=True, keep_original_columns=True)
output_df = flattener.transform(input_df)
assert_df_equality(expected_output_df_pretty_with_original_columns, output_df)
class TestPrettyColumnNames:
@pytest.fixture
def flattener(self):
return Flattener(pretty_names=True)
def test_simple_renames(self, flattener, spark_session):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(int_val=4789, string_val="Hello World")
)])
expected_output_df = spark_session.createDataFrame([
(4789, "Hello World")], schema=["int_val", "string_val"])
output_df = flattener.transform(input_df)
assert_df_equality(output_df, expected_output_df)
def test_duplicated_column_names(self, flattener, spark_session):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(int_val=4789, string_val="Hello World"),
struct_val_2=Row(int_val=4790, string_val="How are you?")
)])
expected_output_df = spark_session.createDataFrame([
(4789, "Hello World", 4790, "How are you?")], schema=["int_val", "string_val", "struct_val_2_int_val", "struct_val_2_string_val"])
output_df = flattener.transform(input_df)
assert_df_equality(output_df, expected_output_df)
def test_nested_struct_attributes(self, flattener, spark_session):
input_df = spark_session.createDataFrame([Row(
struct_val_1=Row(
struct_val_2=Row(
struct_val_3=Row(
struct_val_4=Row(int_val=4789),
int_val=4790),
string_val="Hello"),
double_val=43.12),
timestamp_val=datetime.datetime(2021, 1, 1, 12, 30, 15)
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame(
[(4789, 4790, "Hello", 43.12, datetime.datetime(2021, 1, 1, 12, 30, 15))],
schema=["int_val", "struct_val_3_int_val", "string_val", "double_val", "timestamp_val"]
)
assert_df_equality(expected_output_df, output_df)
def test_multiple_arrays_with_other_columns(self, flattener, spark_session):
input_df = spark_session.createDataFrame([Row(
array_val_1=[4789, 4790, 4791],
array_val_2=["How", "Are", "You", "?"],
double_val=43.102
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(43.102, 4789, "How"),
(43.102, 4789, "Are"),
(43.102, 4789, "You"),
(43.102, 4789, "?"),
(43.102, 4790, "How"),
(43.102, 4790, "Are"),
(43.102, 4790, "You"),
(43.102, 4790, "?"),
(43.102, 4791, "How"),
(43.102, 4791, "Are"),
(43.102, 4791, "You"),
(43.102, 4791, "?")],
schema=["double_val", "array_val_1", "array_val_2"])
assert_df_equality(expected_output_df, output_df)
def test_array_nested_in_array(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[["Here's", "My", "Number", ":"], [555, 127, 53, 90]],
string_val="How are you?"
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
("How are you?", "Here's"),
("How are you?", "My"),
("How are you?", "Number"),
("How are you?", ":"),
("How are you?", "555"),
("How are you?", "127"),
("How are you?", "53"),
("How are you?", "90")],
schema=["string_val", "array_val"])
assert_df_equality(expected_output_df, output_df)
def test_array_nested_in_struct(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(array_val=[4789, 4790, 4791],
string_val="How are you?")
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
("How are you?", 4789),
("How are you?", 4790),
("How are you?", 4791)],
schema=["string_val", "array_val"])
assert_df_equality(expected_output_df, output_df)
def test_struct_nested_in_array(self, spark_session, flattener):
input_df = spark_session.createDataFrame([Row(
array_val=[Row(int_val=4789,
string_val="Hello Darkness",
date_val=datetime.date(2021, 1, 14)),
Row(int_val=4790,
string_val="My Old Friend",
date_val=datetime.date(2021, 1, 15))],
double_val=43.102
)])
output_df = flattener.transform(input_df)
expected_output_df = spark_session.createDataFrame([
(43.102, 4789, "Hello Darkness", datetime.date(2021, 1, 14)),
(43.102, 4790, "My Old Friend", datetime.date(2021, 1, 15))],
schema=["double_val", "int_val", "string_val", "date_val"])
assert_df_equality(expected_output_df, output_df)
class TestKeepOriginalColumns:
@pytest.fixture
def flattener(self):
return Flattener(keep_original_columns=True)
def test_simple_struct(self, flattener, spark_session):
input_df = spark_session.createDataFrame([Row(
struct_val=Row(int_val=4789, string_val="Hello World")
)])
expected_output_df = spark_session.createDataFrame([Row(
original_columns=Row(struct_val=Row(int_val=4789, string_val="Hello World")),
int_val=4789,
string_val="Hello World"
)])
expected_output_df.schema["original_columns"].nullable = False
output_df = flattener.transform(input_df)
assert_df_equality(output_df, expected_output_df)
|
rt-phb/Spooq | tests/unit/transformer/test_enum_cleaner.py | import pytest
from pyspark.sql import Row
from pyspark.sql import functions as F, types as T
from chispa.dataframe_comparer import assert_df_equality
import datetime as dt
from spooq2.transformer import EnumCleaner
class TestBasicAttributes(object):
""" Basic attributes of the transformer instance """
def test_has_logger(self):
assert hasattr(EnumCleaner(), "logger")
def test_has_name(self):
assert EnumCleaner().name == "EnumCleaner"
def test_has_str_representation(self):
assert str(EnumCleaner()) == "Transformer Object of Class EnumCleaner"
class TestExceptionsRaisedAndDefaultParametersApplied:
@pytest.fixture(scope="class")
def input_df(self, spark_session):
return spark_session.createDataFrame([Row(b="positive"), Row(b="negative"), Row(b="positive")])
@pytest.fixture(scope="class")
def expected_output_df(self, spark_session):
return spark_session.createDataFrame([Row(b="positive"), Row(b=None), Row(b="positive")])
def test_missing_elements_list(self, input_df):
"""Missing elements attribute in the cleaning definition dict raises an exception"""
cleaning_definition = dict(b=dict(mode="allow", default="cleansed!"))
with pytest.raises(ValueError) as excinfo:
EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert "Enumeration-based cleaning requires a non-empty list of elements per cleaning rule!" in str(excinfo.value)
assert "Spooq did not find such a list for column: b" in str(excinfo.value)
def test_empty_elements_list(self, input_df):
"""An empty elements attribute in the cleaning definition dict raises an exception"""
cleaning_definition = dict(b=dict(elements=[], mode="allow", default="cleansed!"))
with pytest.raises(ValueError) as excinfo:
EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert "Enumeration-based cleaning requires a non-empty list of elements per cleaning rule!" in str(excinfo.value)
assert "Spooq did not find such a list for column: b" in str(excinfo.value)
def test_missing_mode_defaults_to_allow(self, input_df, expected_output_df):
"""Missing 'mode' attribute is set to the default: 'allow'"""
cleaning_definition = dict(b=dict(elements=["positive"], default=None))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_default_value_defaults_to_none(self, input_df, expected_output_df):
"""Missing 'default' attribute is set to the default: None"""
cleaning_definition = dict(b=dict(elements=["positive"], mode="allow"))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
class TestCleaningOfStrings:
def test_active_inactive_status_allowed(self, spark_session):
"""Only 'active' and 'inactive' allowed, other values are set to 'cleansed!' (except None)"""
input_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=""),
Row(id=3, status="off"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="aktiv"),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status="cleansed!"),
Row(id=3, status="cleansed!"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="cleansed!"),
])
cleaning_definition = dict(status=dict(elements=["active", "inactive"], mode="allow", default="cleansed!"))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_active_inactive_status_disallowed(self, spark_session):
"""'off', '' and None values are not allowed and set to 'inactive' (except for None -> works as expected)"""
input_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=""),
Row(id=3, status="off"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="aktiv"),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status="inactive"),
Row(id=3, status="inactive"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="aktiv"),
])
cleaning_definition = dict(status=dict(elements=["off", "", None], mode="disallow", default="inactive"))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_nullify_almost_null_fields(self, spark_session):
"""Sets values to None which are semantically but not syntactically NULL"""
input_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=""),
Row(id=3, status="None"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="NULL"),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=None),
Row(id=3, status=None),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status=None),
])
cleaning_definition = dict(status=dict(elements=["", "None", "NULL"], mode="disallow"))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_keep_nulls(self, spark_session):
"""Allow only some elements and Null input values are ignored (works as expected)"""
input_df = spark_session.createDataFrame([
Row(id=1, sex="f"),
Row(id=2, sex=""),
Row(id=3, sex="m"),
Row(id=4, sex="x"),
Row(id=5, sex=None),
Row(id=6, sex="Don't want to tell"),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, sex="f"),
Row(id=2, sex="cleansed!"),
Row(id=3, sex="m"),
Row(id=4, sex="x"),
Row(id=5, sex=None),
Row(id=6, sex="cleansed!"),
])
cleaning_definition = dict(sex=dict(elements=["f", "m", "x"], mode="allow", default="cleansed!"))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
class TestCleaningOfIntegers:
def test_version_numbers_allowed(self, spark_session):
"""Only the numbers 112 and 212 are allowed, other values are set to -1"""
input_df = spark_session.createDataFrame([
Row(id=1, version=112),
Row(id=2, version=None),
Row(id=3, version=212),
Row(id=4, version=220),
Row(id=5, version=-112),
Row(id=6, version=0),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, version=112),
Row(id=2, version=None),
Row(id=3, version=212),
Row(id=4, version=-1),
Row(id=5, version=-1),
Row(id=6, version=-1),
])
cleaning_definition = dict(version=dict(elements=[112, 212], mode="allow", default=-1))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_version_numbers_disallowed(self, spark_session):
"""The numbers -112 and 0 are not allowed and set to -1"""
input_df = spark_session.createDataFrame([
Row(id=1, version=112),
Row(id=2, version=None),
Row(id=3, version=212),
Row(id=4, version=220),
Row(id=5, version=-112),
Row(id=6, version=0),
])
expected_output_df = spark_session.createDataFrame([
Row(id=1, version=112),
Row(id=2, version=None),
Row(id=3, version=212),
Row(id=4, version=220),
Row(id=5, version=-1),
Row(id=6, version=-1),
])
cleaning_definition = dict(version=dict(elements=[-112, 0], mode="disallow", default=-1))
output_df = EnumCleaner(cleaning_definitions=cleaning_definition).transform(input_df)
assert_df_equality(expected_output_df, output_df)
class TestDynamicDefaultValues:
@pytest.fixture(scope="class")
def input_df(self, spark_session):
return spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=""),
Row(id=3, status="off"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="aktiv"),
])
def test_current_date(self, input_df, spark_session):
"""Substitute the cleansed values with the current date"""
cleaning_definitions = dict(status=dict(elements=["active", "inactive"], default=F.current_date()))
expected_output_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status=str(dt.date.today())),
Row(id=3, status=str(dt.date.today())),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status=str(dt.date.today())),
])
output_df = EnumCleaner(cleaning_definitions=cleaning_definitions).transform(input_df)
assert_df_equality(expected_output_df, output_df)
def test_column_reference(self, input_df, spark_session):
"""Substitute the cleansed values with the calculated string based on another column"""
default_value_func = (F.col("id") * 10).cast(T.StringType())
cleaning_definitions = dict(status=dict(elements=["active", "inactive"], default=default_value_func))
expected_output_df = spark_session.createDataFrame([
Row(id=1, status="active"),
Row(id=2, status="20"),
Row(id=3, status="30"),
Row(id=4, status="inactive"),
Row(id=5, status=None),
Row(id=6, status="60"),
])
output_df = EnumCleaner(cleaning_definitions=cleaning_definitions).transform(input_df)
assert_df_equality(expected_output_df, output_df)
|
rt-phb/Spooq | src/spooq2/transformer/enum_cleaner.py | import sys
import pyspark.sql.functions as F, types as T
from pyspark.sql.column import Column
from .transformer import Transformer
class EnumCleaner(Transformer):
"""
Cleanses a dataframe based on lists of allowed|disallowed values.
Example
-------
>>> transformer = EnumCleaner(
>>> cleaning_definitions={
>>> "status": {
>>> "elements": ["active", "inactive"],
>>> },
>>> "version": {
>>> "elements": ["", "None", "none", "null", "NULL"],
>>> "mode": "disallow",
>>> "default": None
>>> },
>>> }
>>> )
Parameters
----------
cleaning_definitions : :py:class:`dict`
Dictionary containing column names and respective cleansing rules
Note
----
Following cleansing rule attributes per column are supported:
* elements, mandatory - :class:`list`
A list of elements which will be used to allow or reject (based on mode) values from the input DataFrame.
* mode, allow|disallow, defaults to 'allow' - :any:`str`
"allow" will set all values which are NOT in the list (ignoring NULL) to the default value.
"disallow" will set all values which ARE in the list (ignoring NULL) to the default value.
* default, defaults to None - :class:`~pyspark.sql.column.Column` or any primitive Python value
If a value gets cleansed it gets replaced with the provided default value.
Returns
-------
:any:`pyspark.sql.DataFrame`
The transformed DataFrame
Raises
------
:any:`exceptions.ValueError`
Enumeration-based cleaning requires a non-empty list of elements per cleaning rule!
Spooq did not find such a list for column: {column_name}
:any:`exceptions.ValueError`
Only the following modes are supported by EnumCleaner: 'allow' and 'disallow'.
Warning
-------
None values are explicitly ignored as input values because `F.lit(None).isin(["elem1", "elem2"])` will neither
return True nor False but None.
If you want to replace Null values you should use the method ~pyspark.sql.DataFrame.fillna from Spark.
"""
def __init__(self, cleaning_definitions={}):
super().__init__()
self.cleaning_definitions = cleaning_definitions
self.logger.debug("Enumeration List: " + str(self.cleaning_definitions))
def transform(self, input_df):
self.logger.debug("input_df Schema: " + input_df._jdf.schema().treeString())
for column_name, cleaning_definition in list(self.cleaning_definitions.items()):
self.logger.debug(f"Cleaning Definition for Column {column_name}: {str(cleaning_definition)}")
elements = cleaning_definition.get("elements", None)
if not elements:
raise ValueError(
f"Enumeration-based cleaning requires a non-empty list of elements per cleaning rule!",
f"\nSpooq did not find such a list for column: {column_name}"
)
mode = cleaning_definition.get("mode", "allow")
substitute = cleaning_definition.get("default", None)
data_type = input_df.schema[column_name].dataType
if not isinstance(substitute, Column):
substitute = F.lit(substitute)
if mode == "allow":
input_df = input_df.withColumn(
column_name,
F.when(F.col(column_name).isNull(), F.lit(None))
.otherwise(
F.when(F.col(column_name).isin(elements), F.col(column_name))
.otherwise(substitute)
)
.cast(data_type)
)
elif mode == "disallow":
input_df = input_df.withColumn(
column_name,
F.when(F.col(column_name).isNull(), F.lit(None))
.otherwise(
F.when(F.col(column_name).isin(elements), substitute)
.otherwise(F.col(column_name))
)
.cast(data_type)
)
else:
raise ValueError(
f"Only the following modes are supported by EnumCleaner: 'allow' and 'disallow'."
)
return input_df
|
rt-phb/Spooq | src/spooq2/extractor/jdbc.py | <gh_stars>0
from __future__ import absolute_import
from builtins import str
from past.builtins import basestring
import pandas as pd
from copy import copy
from pyspark.sql import SparkSession
from pyspark.sql.functions import min as sql_min
from pyspark.sql.functions import max as sql_max
from pyspark.sql.types import StructField, StructType
from pyspark.sql.types import IntegerType, StringType
from .extractor import Extractor
class JDBCExtractor(Extractor):
def __init__(self, jdbc_options, cache=True):
super(JDBCExtractor, self).__init__()
self._assert_jdbc_options(jdbc_options)
self.jdbc_options = jdbc_options
self.cache = cache
self.spark = (
SparkSession.Builder()
.enableHiveSupport()
.appName("spooq2.extractor: {nm}".format(nm=self.name))
.getOrCreate()
)
def _load_from_jdbc(self, query, jdbc_options, cache=True):
jdbc_options = copy(self.jdbc_options)
jdbc_options["dbtable"] = "({q}) as table_statement".format(q=query)
source_df = self.spark.read.format("jdbc").options(**jdbc_options).load()
if cache:
source_df.cache()
return source_df
def _assert_jdbc_options(self, jdbc_options):
for key in ["url", "driver", "user", "password"]:
assert key in jdbc_options, key + " is missing from the jdbc_options."
assert isinstance(jdbc_options[key], basestring), key + " has to be provided as a string object."
class JDBCExtractorFullLoad(JDBCExtractor):
"""
Connects to a JDBC Source and fetches the data defined by the provided Query.
Examples
--------
>>> import spooq2.extractor as E
>>>
>>> extractor = E.JDBCExtractorFullLoad(
>>> query="select id, first_name, last_name, gender, created_at test_db.from users",
>>> jdbc_options={
>>> "url": "jdbc:postgresql://localhost/test_db",
>>> "driver": "org.postgresql.Driver",
>>> "user": "read_only",
>>> "password": "<PASSWORD>",
>>> },
>>> )
>>>
>>> extracted_df = extractor.extract()
>>> type(extracted_df)
pyspark.sql.dataframe.DataFrame
Parameters
----------
query : :any:`str`
Defines the actual query sent to the JDBC Source. This has to be a valid SQL query
with respect to the source system (e.g., T-SQL for Microsoft SQL Server).
jdbc_options : :class:`dict`, optional
A set of parameters to configure the connection to the source:
* **url** (:any:`str`) - A JDBC URL of the form jdbc:subprotocol:subname.
e.g., jdbc:postgresql://localhost:5432/dbname
* **driver** (:any:`str`) - The class name of the JDBC driver to use to connect to this URL.
* **user** (:any:`str`) - Username to authenticate with the source database.
* **password** (:any:`str`) - Password to authenticate with the source database.
See :meth:`pyspark.sql.DataFrameReader.jdbc` and
https://spark.apache.org/docs/2.4.3/sql-data-sources-jdbc.html for more information.
cache : :any:`bool`, defaults to :any:`True`
Defines, weather to :meth:`~pyspark.sql.DataFrame.cache` the dataframe, after it is loaded.
Otherwise the Extractor will reload all data from the source system eachtime an action is
performed on the DataFrame.
Raises
------
:any:`exceptions.AssertionError`:
All jdbc_options values need to be present as string variables.
"""
def __init__(self, query, jdbc_options, cache=True):
super(JDBCExtractorFullLoad, self).__init__(jdbc_options=jdbc_options, cache=cache)
self.query = query
def extract(self):
"""
This is the Public API Method to be called for all classes of Extractors
Parameters
----------
Returns
-------
:py:class:`pyspark.sql.DataFrame`
PySpark dataframe from the input JDBC connection.
"""
return self._load_from_jdbc(self.query, jdbc_options=self.jdbc_options, cache=self.cache)
class JDBCExtractorIncremental(JDBCExtractor):
"""
Connects to a JDBC Source and fetches the data with respect to boundaries.
The boundaries are inferred from the partition to load and logs from previous loads
stored in the ``spooq2_values_table``.
Examples
--------
>>> import spooq2.extractor as E
>>>
>>> # Boundaries derived from previously logged extractions => ("2020-01-31 03:29:59", False)
>>>
>>> extractor = E.JDBCExtractorIncremental(
>>> partition="20200201",
>>> jdbc_options={
>>> "url": "jdbc:postgresql://localhost/test_db",
>>> "driver": "org.postgresql.Driver",
>>> "user": "read_only",
>>> "password": "<PASSWORD>",
>>> },
>>> source_table="users",
>>> spooq2_values_table="spooq2_jdbc_log_users",
>>> )
>>>
>>> extractor._construct_query_for_partition(extractor.partition)
select * from users where updated_at > "2020-01-31 03:29:59"
>>>
>>> extracted_df = extractor.extract()
>>> type(extracted_df)
pyspark.sql.dataframe.DataFrame
Parameters
----------
partition : :any:`int` or :any:`str`
Partition to extract. Needed for logging the incremental load in
the ``spooq2_values_table``.
jdbc_options : :class:`dict`, optional
A set of parameters to configure the connection to the source:
* **url** (:any:`str`) - A JDBC URL of the form jdbc:subprotocol:subname.
e.g., jdbc:postgresql://localhost:5432/dbname
* **driver** (:any:`str`) - The class name of the JDBC driver to use to connect to this URL.
* **user** (:any:`str`) - Username to authenticate with the source database.
* **password** (:any:`str`) - Password to authenticate with the source database.
See :meth:`pyspark.sql.DataFrameReader.jdbc` and
https://spark.apache.org/docs/2.4.3/sql-data-sources-jdbc.html for more information.
source_table : :any:`str`
Defines the tablename of the source to be loaded from. For example 'purchases'.
This is necessary to build the query.
spooq2_values_table : :any:`str`
Defines the Hive table where previous and future loads of a specific source table
are logged. This is necessary to derive boundaries for the current partition.
spooq2_values_db : :any:`str`, optional
Defines the Database where the ``spooq2_values_table`` is stored.
Defaults to `'spooq2_values'`.
spooq2_values_partition_column : :any:`str`, optional
The column name which is used for the boundaries.
Defaults to `'updated_at'`.
cache : :any:`bool`, defaults to :any:`True`
Defines, weather to :meth:`~pyspark.sql.DataFrame.cache` the dataframe, after it is loaded. Otherwise the Extractor
will reload all data from the source system again, if a second action upon the dataframe
is performed.
Raises
------
:any:`exceptions.AssertionError`:
All jdbc_options values need to be present as string variables.
"""
def __init__(
self,
partition,
jdbc_options,
source_table,
spooq2_values_table,
spooq2_values_db="spooq2_values",
spooq2_values_partition_column="updated_at",
cache=True,
):
super(JDBCExtractorIncremental, self).__init__(jdbc_options)
self.partition = partition
self.source_table = source_table
self.spooq2_values_table = spooq2_values_table
self.spooq2_values_db = spooq2_values_db
self.spooq2_values_partition_column = spooq2_values_partition_column
self.cache = cache
def extract(self):
query = self._construct_query_for_partition(partition=self.partition)
loaded_df = self._load_from_jdbc(query, self.jdbc_options, cache=self.cache)
self._update_boundaries_for_current_partition_on_table(
loaded_df,
self.spooq2_values_db,
self.spooq2_values_table,
self.partition,
self.spooq2_values_partition_column,
)
return loaded_df
def _construct_query_for_partition(self, partition):
"""Constructs and returns a predicated Query :any:`str` depending on the `partition`
Based on the partition and previous loading logs (`spooq2_values_table`),
boundaries will be calculated and injected in the where clause of the query.
Parameters
----------
partition : Integer or :any:`str`
Returns
-------
:any:`str`
Complete Query :any:`str` to be used for JDBC Connections
"""
select_statement = "select *"
where_clause = ""
lower_bound, upper_bound = self._get_boundaries_for_import(partition)
def _fix_boundary_value_syntax(boundary):
"""If a boundary value is not a number, it has to be quoted for correct syntax."""
try:
boundary = int(boundary)
except ValueError:
boundary = '"{bnd}"'.format(bnd=boundary)
return boundary
if lower_bound and upper_bound:
where_clause = "where {chk_col} > {low_bnd} and {chk_col} <= {up_bnd}".format(
chk_col=self.spooq2_values_partition_column,
low_bnd=_fix_boundary_value_syntax(lower_bound),
up_bnd=_fix_boundary_value_syntax(upper_bound),
)
elif lower_bound:
where_clause = "where {chk_col} > {low_bnd}".format(
chk_col=self.spooq2_values_partition_column, low_bnd=_fix_boundary_value_syntax(lower_bound),
)
elif upper_bound:
where_clause = "where {chk_col} <= {up_bnd}".format(
chk_col=self.spooq2_values_partition_column, up_bnd=_fix_boundary_value_syntax(upper_bound),
)
query = "{select} from {tbl} {where}".format(select=select_statement, tbl=self.source_table, where=where_clause)
return " ".join(query.split())
def _get_boundaries_for_import(self, partition):
"""
Returns the lower and upper boundaries to be used in the where clause of the query.
This information is deducted from the ``partition`` parameter and previous loading logs
(persisted in `spooq2_values_table`).
Parameters
----------
partition : :py:class:`int` or :any:`str`
Returns
-------
Tuple of :any:`str`
Values of the tuple can also be `False`
"""
pd_df = self._get_previous_boundaries_table_as_pd(self.spooq2_values_db, self.spooq2_values_table)
partition = int(partition)
table_is_empty = pd_df.empty
partition_exists = not pd_df.loc[pd_df["dt"] == partition].empty
succeeding_partition_exists = not pd_df.loc[pd_df["dt"] > partition].empty
preceding_partition_exists = not pd_df.loc[pd_df["dt"] < partition].empty
if table_is_empty:
"""First import ever, starting from zero"""
return False, False
elif partition_exists:
"""Partition to insert already exists (reload / backfill)"""
return self._get_lower_and_upper_bounds_from_current_partition(pd_df, partition)
else:
"""Partition to insert does not yet exist (new day to insert)"""
if preceding_partition_exists and not succeeding_partition_exists:
"""No equal or newer partitions exist (newest partition will be imported). Default Case"""
return (
self._get_upper_bound_from_preceding_partition(pd_df, partition),
False,
)
elif not preceding_partition_exists and succeeding_partition_exists:
"""No older or equal partitions exist"""
return (
False,
self._get_lower_bound_from_succeeding_partition(pd_df, partition),
)
elif preceding_partition_exists and succeeding_partition_exists:
"""At least one older, no equal and at least one newer partitions exist"""
return (
self._get_upper_bound_from_preceding_partition(pd_df, partition),
self._get_lower_bound_from_succeeding_partition(pd_df, partition),
)
else:
raise Exception(
"""
ERROR: Something weird happened...
There was a logical problem getting the correct boundaries
from the spooq2value table!
Please debug me ;-)
"""
)
def _get_previous_boundaries_table_as_pd(self, spooq2_values_db, spooq2_values_table):
"""
Converts the previous_boundaries_table and returns a Pandas Dataframe.
Parameters
----------
spooq2_values_db : :any:`str`
spooq2_values_table : :any:`str`
Returns
-------
Pandas Dataframe
Content of `spooq2_values_table` from `spooq2_values_db`
"""
return self._get_previous_boundaries_table(spooq2_values_db, spooq2_values_table).toPandas()
def _get_previous_boundaries_table(self, spooq2_values_db, spooq2_values_table):
"""
Fetches and returns a DataFrame containing the logs of previous loading
jobs (`spooq2_values_table`) of this entity
Parameters
----------
spooq2_values_db : :any:`str`
spooq2_values_table : :any:`str`
Returns
-------
PySpark DataFrame
Content of `spooq2_values_table` from `spooq2_values_db`
"""
table_name = "{db}.{tbl}".format(db=spooq2_values_db, tbl=spooq2_values_table)
self.logger.info("Loading Spooq2Values Table from {name}".format(name=table_name))
df = self.spark.table(table_name)
try:
self.spooq2_values_partition_column
except AttributeError:
self.spooq2_values_partition_column = df.select("partition_column").distinct().collect()[0].partition_column
return df
@staticmethod
def _get_lower_bound_from_succeeding_partition(pd_df, partition):
succeeding_pd_df = pd_df.loc[pd_df["dt"] > partition]
return succeeding_pd_df.sort_values("dt", ascending=1).iloc[0].first_value
@staticmethod
def _get_upper_bound_from_preceding_partition(pd_df, partition):
preceding_pd_df = pd_df.loc[pd_df["dt"] < partition]
return preceding_pd_df.sort_values("dt", ascending=0).iloc[0].last_value
@staticmethod
def _get_lower_and_upper_bounds_from_current_partition(pd_df, partition):
current_pd_df = pd_df.loc[pd_df["dt"] == partition].iloc[0]
return (current_pd_df.first_value, current_pd_df.last_value)
def _get_lowest_boundary_from_df(self, df, spooq2_values_partition_column):
return eval(
"df.na.drop(how='any', subset=['{chk_col}']).select(sql_min(df.{chk_col}).alias('minimum')).collect()[0].minimum".format(
chk_col=spooq2_values_partition_column
)
)
def _get_highest_boundary_from_df(self, df, spooq2_values_partition_column):
return eval(
"df.na.drop(how='any', subset=['{chk_col}']).select(sql_max(df.{chk_col}).alias('maximum')).collect()[0].maximum".format(
chk_col=spooq2_values_partition_column
)
)
def _update_boundaries_for_current_partition_on_table(
self, df, spooq2_values_db, spooq2_values_table, partition, spooq2_values_partition_column
):
lowest_boundary = self._get_lowest_boundary_from_df(df, spooq2_values_partition_column)
highest_boundary = self._get_highest_boundary_from_df(df, spooq2_values_partition_column)
self._write_boundaries_to_hive(
lowest_boundary,
highest_boundary,
spooq2_values_db,
spooq2_values_table,
partition,
spooq2_values_partition_column,
)
def _write_boundaries_to_hive(
self,
lowest_boundary,
highest_boundary,
spooq2_values_db,
spooq2_values_table,
partition,
spooq2_values_partition_column,
):
self.spark.conf.set("hive.exec.dynamic.partition", "true")
self.spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
schema = StructType(
[
StructField("partition_column", StringType(), True),
StructField("dt", IntegerType(), False),
StructField("first_value", StringType(), False),
StructField("last_value", StringType(), False),
]
)
input_data = [
[
str(spooq2_values_partition_column),
int(partition),
str(lowest_boundary),
str(highest_boundary),
]
]
df_output = self.spark.createDataFrame(input_data, schema=schema)
df_output.repartition(1).write.mode("overwrite").insertInto(
"{db}.{tbl}".format(db=spooq2_values_db, tbl=spooq2_values_table, dt=partition)
)
|
rt-phb/Spooq | setup.py | <reponame>rt-phb/Spooq<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import re
from setuptools import setup, find_packages
version_filename = 'src/spooq2/_version.py'
version_file = open(version_filename, "rt").read()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
regex_result = re.search(version_regex, version_file, re.M)
if regex_result:
version_string = regex_result.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (version_filename,))
with open('README.md') as readme_file:
readme = readme_file.read()
with open('CHANGELOG.rst') as history_file:
history = history_file.read()
requirements = [
'pandas',
'future'
]
setup(
name='Spooq2',
version=version_string,
description="""
Spooq helps to run basic ETL processes in Data Lakes based on Apache Spark.
All extractors, transformers, and loaders are single components which can be mixed to one's liking.
""",
long_description=readme + '\n\n' + history,
author="<NAME>",
author_email=['<EMAIL>', '<EMAIL>'],
url='https://github.com/Breaka84/Spooq',
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords=[
'spooq', 'spark', 'hive', 'cloudera', 'hadoop', 'etl',
'data ingestion', 'data wrangling', 'databricks', 'big data'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
]
)
|
rt-phb/Spooq | docs/source/base_classes/create_transformer/init.py | <filename>docs/source/base_classes/create_transformer/init.py
from newest_by_group import NewestByGroup
from mapper import Mapper
from exploder import Exploder
from threshold_cleaner import ThresholdCleaner
from sieve import Sieve
from no_id_dropper import NoIdDropper
__all__ = [
"NewestByGroup",
"Mapper",
"Exploder",
"ThresholdCleaner",
"Sieve",
"NoIdDropper",
]
|
fyang3/narrative_chains | src/evaluation.py | <filename>src/evaluation.py
# testing narrative cloze
testing_pairs = [
([("receive", "clients", "nsubj"), ("download", "clients", "dobj")], ("make", "clients", "nsubj")),
([("fled", "gelman", "nsubj"), ("found", "gelman", "nsubj")], ("take", "gelman", "nsubj")),
([("am", "i", "nsubj"), ("did", "i", "nsubj"), ("think", "i", "nsubj")], ('believe', "i", "nsubj")),
([("bought", "team", "dobj"), ("included", "team", "dobj")], ("take", "team", "nsubj")),
([("heard", "parents", "nsubj"), ("talking", "parents", "nsubj")], ("choose", "parents", "nsubj")),
([("buy", 'stock', 'dobj'), ("lend", 'money', 'dobj')], ("struggle", 'edison', 'nsubj')),
([("advocated", 'league', 'nsubj'), ("fought", 'league', 'nsubj')], ("withdraw", 'league', 'nsubj')),
([("was", 'cranston', 'nsubj'), ("spent", 'cranston', 'nsubj'), ("fight", 'cranston', 'nsubj')], ("raise", 'cranston', 'nsubj')),
([("have", 'administration', 'nsubj'), ("convinced", 'administration', 'nsubj'), ("look", 'administration', 'nsubj')], ("push", 'administration', 'nsubj')),
([('hug', 'father', 'dobj'), ('tell', 'father', 'dobj')], ('love', 'father', 'dobj')),
([('be', 'i', 'nsubj'), ('get', 'i', 'nsubj'), ('have', 'i', 'nsubj')], ('call', 'i', 'nsubj'))
]
def get_position(predictions, correct):
for i in range(len(predictions)):
if predictions[i][0] == correct[0]:
return i + 1
return len(predictions)
print("\nEvaluating Narrative Cloze Positions: ")
positions = list()
for chain, correct in testing_pairs:
predictions = predict(chain)
position = get_position(predictions, correct)
positions.append(position)
print("position: ", position)
# computing averages
average = sum(positions) / len(positions)
print("\naverage position: ", average)
adjusted_average = sum([x for x in positions if x != len(verbs)]) / len([x for x in positions if x != len(verbs)])
print("adjusted average position: ", adjusted_average) |
fyang3/narrative_chains | src/data.py | <reponame>fyang3/narrative_chains<gh_stars>1-10
# read file and clean input
with open(INPUT_FILE) as f:
text = " ".join(f.readlines()[13:-7])
# write events to output file
with open(OUTPUT_FILE, "w") as file:
for event in ordered:
file.write("\n" + str(event))
# TODO: pickle subjects, objects, coreference, total
class Model: pass
model = Model()
model.subjects, model.objects, model.coreference = dict(subjects), dict(objects), dict(coreference)
model.total, model.total_coreference = total, total_coreference
print("\nDumping Model")
with open("model.pickle", "wb") as file:
pickle.dump(model, file)
print("successfully saved to model.pickle") |
fyang3/narrative_chains | src/parse.py | <reponame>fyang3/narrative_chains
import neuralcoref
import spacy
from collections import defaultdict
# identify events
ordered = list()
subjects = defaultdict(lambda: defaultdict(int))
objects = defaultdict(lambda: defaultdict(int))
total = 0
# chunking text and parsing
spacy.prefer_gpu()
for i in range(0, MAX_LENGTH, CHUNK_LENGTH):
chunk = text[i:i + CHUNK_LENGTH]
print("\nchunk ", int(i / CHUNK_LENGTH))
# resolve entities and gramatically parse
print("parsing chunk")
nlp = spacy.load("en")
neuralcoref.add_to_pipe(nlp)
corpus = nlp(chunk)
print("mining events")
for token in corpus:
if token.pos == spacy.symbols.VERB:
for argument in token.children:
# resolve argument coreference entity
if argument._.in_coref: esolved = argument._.coref_clusters[0].main.text
else: resolved = argument.text
if argument.dep_ in {"nsubj", "nsubjpass"}:
subjects[token.lemma_.lower()][argument.text.lower()] += 1
ordered.append((token.lemma_, resolved.lower(), argument.dep_))
total += 1
elif argument.dep_ in {"dobj", "iobj", "pobj", "obj"}:
objects[token.lemma_.lower()][argument.text.lower()] += 1
ordered.append((token.lemma_, resolved.lower(), argument.dep_))
total += 1
verbs = set(subjects.keys()) | set(objects.keys())
print("total verb count: ", len(verbs))
# create coreference matrix
print("\nComputing Coreference Matrix")
coreference = defaultdict(lambda: defaultdict(int))
total_coreference = 0
for verb1 in verbs:
for verb2 in verbs:
verb1_subjects = set(subjects[verb1].keys())
for argument in subjects[verb2]:
if argument in verb1_subjects:
coreference[verb1][verb2] += 1
total_coreference += 1
verb1_objects = set(objects[verb1].keys())
for argument in objects[verb2]:
if argument in verb1_objects:
coreference[verb1][verb2] += 1
total_coreference += 1
print("total coreference count: ", total_coreference) |
fyang3/narrative_chains | src/models.py | <reponame>fyang3/narrative_chains
import math
# marginal probability of event: P(e)
def marginal(event):
verb, dependency, dep_type = event
frequency = sum([subjects[verb][x] for x in subjects[verb]]) + sum([objects[verb][x] for x in objects[verb]])
return frequency / total
# joint probability of two events
def joint(event1, event2):
verb1, verb2 = event1[0], event2[0]
return (coreference[verb1][verb2] + coreference[verb2][verb1]) / total_coreference
# pointwise mutual information approximation of two events
def pmi(event1, event2):
numerator = joint(event1, event2)
marginal1, marginal2 = marginal(event1), marginal(event2)
if marginal1 == 0 or marginal2 == 0 or numerator == 0: return 0.0
denominator = math.exp(math.log(marginal1) + math.log(marginal2))
return math.log(numerator / denominator)
# chain prediction
def predict(chain):
scores = dict()
for verb in verbs:
score = 0
for event in chain:
score += pmi(event, (verb, None, None))
scores[verb] = score
cleaned_scores = dict()
chain_verbs = set()
for event in chain:
chain_verbs.add(event)
for candidate in scores:
if candidate not in chain_verbs:
cleaned_scores[candidate] = scores[candidate]
ranked_scores = sorted(list(cleaned_scores.items()), key=lambda x: x[1], reverse=True)
return ranked_scores |
Seraphyx/reddit_explorer | setup.py | #!/usr/bin/env python
from setuptools import setup
from setuptools import find_packages
setup(
name='rexplore',
version='1.0',
description='Reddit API storer and explorer',
author='Seraphyx',
author_email='<EMAIL>',
url='https://github.com/Seraphyx/reddit_explorer',
packages = find_packages()
)
|
Seraphyx/reddit_explorer | rexplore/db/__init__.py | from .start import *
from .clean import *
from .insert import * |
Seraphyx/reddit_explorer | rexplore/db/start.py | import configparser
import _mysql
from _mysql_exceptions import *
from rexplore import db
'''
For documentation:
http://mysqlclient.readthedocs.io/user_guide.html?highlight=mysql_options#introduction
'''
def config(config_path):
config = configparser.ConfigParser()
config.read(config_path)
return config
def read_sql(sql_path, **kwargs):
with open(sql_path, 'r') as file:
query = file.read()
for param in kwargs:
query = query.replace('$' + param, kwargs[param])
return query
class initialize(object):
def __init__(self, config_path):
self.config = config(config_path)
# self.con()
def connect(self):
return _mysql.connect(
host=self.config.get('MySql','host'),
port=self.config.getint('MySql','port'),
user=self.config.get('MySql','user'),
passwd=self.config.get('MySql','password'),
db=self.config.get('MySql','db'))
def create_schema(self):
con = self.connect()
print('=== Creating [%s.user] table' % self.config.get('MySql','db'))
create_user = read_sql('sql/CREATE_user.sql')
con.query(create_user)
print('=== Creating [%s.comment] table' % self.config.get('MySql','db'))
create_user = read_sql('sql/CREATE_comment.sql')
con.query(create_user)
con.close()
def insert_single(self, table, values_dict):
# Initialize
con = self.connect()
sql = "INSERT INTO " + self.config.get('MySql','db') + "." + table + " "
first = True
values = []
con.escape_string("'")
for col, value in values_dict.items():
if first:
sql += "(" + col
first = False
else:
sql += ", " + col
if isinstance(value, str):
values.append("'" + value + "'")
else:
values.append(value)
values = "(" + ', '.join(str(val) for val in values) + ");"
sql += ") VALUES " + values
self.run_sql(con, sql)
con.close()
def insert_single_comment(self, table, values_dict):
list_key = [key for key in values_dict]
list_val = [_mysql.escape_string(val).decode("""utf-8""", """ignore""") if isinstance(val, str) else val for key, val in values_dict.items()]
list_val = ["""'""" + val + """'""" if isinstance(val, str) else val for val in list_val]
# Initialize
con = self.connect()
query = ("""INSERT INTO """ + self.config.get('MySql','db') + """.""" + table + """ """ +
"""(""" + """,""".join(["""%s"""] * len(list_key)) + """) """ +
"""VALUES """ +
"""(""" + """,""".join(["""%s"""] * len(list_val)) + """)""")
query = (query % tuple(list_key + list_val))
self.run_sql(con, query)
con.close()
def insert_user(self, user_obj):
user_values = db.clean_user(user_obj)
self.insert_single('user', user_values)
def insert_comment(self, comment_obj):
comment_values = db.clean_comment(comment_obj)
self.insert_single_comment('comment', comment_values)
def run_sql(self, connection, sql):
try:
return connection.query(sql)
except IntegrityError as e:
print(e)
if __name__ == '__main__':
init = initialize('../../config/config.ini')
init.create_schema()
# sq = read_sql('sql/CREATE_user.sql', **{
# 'user_id': 'hello',
# 'db':'date'})
# print(sq) |
Seraphyx/reddit_explorer | rexplore/initialize.py | <reponame>Seraphyx/reddit_explorer
import mysql
import configparser
def initialize(config_path):
'''
Import a config .ini file.
It should have the following definition:
'''
config = configparser.ConfigParser()
config.read(config_path)
|
Seraphyx/reddit_explorer | rexplore/db/clean.py | <reponame>Seraphyx/reddit_explorer<filename>rexplore/db/clean.py
import collections
from functools import reduce
from datetime import datetime
def clean_user(user):
'''
arg:
user: reddit.redditor('<username>')
'''
out = collections.OrderedDict()
out['id'] = user.id
out['name'] = user.name
# out['icon_img'] = user.icon_img
out['pull_ts'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
out['created_utc'] = datetime.utcfromtimestamp(user.created_utc).strftime('%Y-%m-%d %H:%M:%S')
out['link_karma'] = user.link_karma
out['comment_karma'] = user.comment_karma
out['is_employee'] = user.is_employee
out['is_mod'] = user.is_mod
out['verified'] = user.verified
return out
def getattr_deep(obj, attr, default=''):
if obj is None:
return default
attr_list = attr.split('.')
if attr_list[0] in vars(obj).keys():
obj = getattr(obj, attr_list[0])
else:
return default
if len(attr_list) == 1:
return obj
else:
return getattr_deep(obj, '.'.join(attr_list[1:]))
def clean_comment(comment):
out = collections.OrderedDict()
def parse_author_id(comment):
obj = getattr(comment, 'author', None)
if obj is None:
return ''
print(vars(obj).keys())
print(str(obj))
print(type(obj.name))
if 'id' in vars(obj).keys():
return getattr(obj, 'id', '')
return ''
def parse_author_name(comment):
obj = getattr(comment, 'author', None)
if obj is None:
return ''
print(vars(obj).keys())
print(str(obj))
print(type(obj.name))
if 'id' in vars(obj).keys():
return getattr(obj, 'name', '')
return ''
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
return reduce(getattr, attr.split('.'), obj)
out['id'] = comment.id
out['author'] = getattr_deep(comment, 'author.name')
out['author_id'] = getattr_deep(comment, 'author.id')
out['name'] = getattr_deep(comment, 'name', '')
out['parent_id'] = getattr_deep(comment, 'parent_id', '')
out['link_id'] = getattr_deep(comment, 'link_id', '')
# out['subreddit'] = comment.subreddit.display_name
out['subreddit_id'] = getattr_deep(comment, 'subreddit.id', '')
# out['permalink'] = comment.permalink
out['pull_ts'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
out['created_utc'] = datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S')
out['depth'] = getattr_deep(comment, 'depth', 0)
out['edited'] = getattr_deep(comment, 'edited', False)
out['gilded'] = getattr_deep(comment, 'gilded', False)
out['score'] = getattr_deep(comment, 'score', 0)
out['ups'] = getattr_deep(comment, 'ups', 0)
out['downs'] = getattr_deep(comment, 'downs', 0)
out['controversiality'] = getattr_deep(comment, 'controversiality', 0)
out['score_hidden'] = getattr_deep(comment, 'score_hidden', False)
out['collapsed'] = getattr_deep(comment, 'collapsed', False)
out['body'] = getattr_deep(comment, 'body')
return out
if __name__ == '__main__':
v = ",".join(["%s"] * 3)
print(v)
v = v.split(',')
print(v)
print(len(v))
print(v[1:])
print(len(v[1:]))
print('.'.join(v[1:])) |
Seraphyx/reddit_explorer | example/example.py | <filename>example/example.py
import praw
import pprint
import configparser
import time
from pprint import pprint
from rexplore import db
import uuid
# See: https://github.com/wlindner/python-reddit-scraper/blob/master/scraper.py
'''
How to insert
User:
init = db.initialize('../config/config.ini')
init.insert_user(user)
'''
# uses environment variables
# http://praw.readthedocs.io/en/latest/getting_started/configuration/environment_variables.html
reddit = praw.Reddit('myapp', user_agent='myapp user agent')
def config():
config = configparser.ConfigParser()
config.read('config.ini')
return config
def main():
print ('logged in to Reddit as: ' + str(reddit.user.me()))
subreddit = reddit.subreddit('Showerthoughts')
# for index, submission in enumerate(subreddit.submissions()):
# if index % 100 == 0 and index > 0: print(str(index) + ' inserted so far')
# print(submission)
for submission in reddit.subreddit('learnpython').hot(limit=10):
print(submission.title)
def test():
print(vars(reddit))
# (1) List of all popular subreddits
subreddits = reddit.subreddits.popular()
for subreddit in subreddits:
print(subreddit)
# (2) List of all posts in a subreddit
subreddit = reddit.subreddit('overwatch')
# (3) List of all users from the posts
# (4) List of all posts from user
user = reddit.redditor('Makirole')
print('===============')
'''
{'_fetched': True,
'_info_params': {},
'_mod': None,
'_reddit': <praw.reddit.Reddit object at 0x0000026D6FC2DA20>,
'_replies': <praw.models.comment_forest.CommentForest object at 0x0000026D7212CFD0>,
'_submission': Submission(id='5623n2'),
'approved_at_utc': None,
'approved_by': None,
'archived': True,
'author': Redditor(name='chaoscontrol91'),
'author_flair_css_class': None,
'author_flair_text': None,
'banned_at_utc': None,
'banned_by': None,
'body': 'I like this a lot better. ',
'body_html': '<div class="md"><p>I like this a lot better. </p>\n</div>',
'can_gild': True,
'can_mod_post': False,
'collapsed': False,
'collapsed_reason': None,
'controversiality': 0,
'created': 1475744159.0,
'created_utc': 1475715359.0,
'depth': 0,
'distinguished': None,
'downs': 0,
'edited': False,
'gilded': 0,
'id': 'd8ftv3d',
'is_submitter': False,
'likes': None,
'link_id': 't3_5623n2',
'mod_note': None,
'mod_reason_by': None,
'mod_reason_title': None,
'mod_reports': [],
'name': 't1_d8ftv3d',
'num_reports': None,
'parent_id': 't3_5623n2',
'permalink': '/r/Overwatch/comments/5623n2/real_hanzo_with_a_twist/d8ftv3d/',
'removal_reason': None,
'report_reasons': None,
'saved': False,
'score': 8,
'score_hidden': False,
'stickied': False,
'subreddit': Subreddit(display_name='Overwatch'),
'subreddit_id': 't5_2u5kl',
'subreddit_name_prefixed': 'r/Overwatch',
'subreddit_type': 'public',
'ups': 8,
'user_reports': []}
'''
print(user._path)
print('===============')
# user_values = db.clean_user(user)
# pprint(user_values)
#===== Insert user
init = db.initialize('../config/config.ini')
init.insert_user(user)
post = reddit.submission(id='5623n2')
print('post=====================================================')
print('post=====================================================')
print('post=====================================================')
print(post)
pprint(vars(post))
print(len(post.comments))
print(post.comments[0])
comment = post.comments[2]
print('post=====================================================')
print('post=====================================================')
print('post=====================================================')
pprint(vars(comment))
#===== Insert Comment
# init.insert_comment(comment)
# print(comment.id)
# for reply_i, reply in enumerate(comment._replies):
# print(len(reply._replies))
# print('\treply = %d [depth=%d][id=%s][parent=%s]' % (reply_i, reply.depth, reply.id, reply.parent_id))
# for reply_2_i, reply_2 in enumerate(reply._replies):
# print(len(reply_2._replies))
# print('\t\treply = %d [depth=%d][id=%s][parent=%s]' % (reply_2_i, reply_2.depth, reply_2.id, reply_2.parent_id))
# print(len(comment._replies))
def comment_recursive(comment_obj):
for reply_i, reply in enumerate(comment_obj._replies):
print(('\t' * reply.depth) + 'reply = %d [depth=%d][id=%s][parent=%s][replies=%d]' % (reply_i, reply.depth, reply.id, reply.parent_id, len(reply._replies)))
init.insert_comment(reply)
if len(reply._replies) > 0:
comment_recursive(reply)
post.comments.replace_more(limit=None)
for comment_i, comment in enumerate(post.comments):
print("===== Comment #%d" % comment_i)
comment_recursive(comment)
# values_dict = db.clean_comment(comment)
# print(values_dict)
# for comment in post.comments:
# print(comment)
# pprint(vars(comment))
# print('user.comments')
# print(vars(user.comments).keys())
# print('user.comments === LOOP')
# for index, comment in enumerate(user.comments.new()):
# print('============================= ' + str(index))
# print(':::: %s' % comment.subreddit)
# print(comment.body)
# ckeys = vars(comment).keys()
if __name__ == '__main__':
# main()
test()
# print(getattr({}, 'a', None))
# key = uuid.uuid4()
# print(key)
# print(type(key))
# print('inserting', repr(key.bytes))
# print('INSERT INTO xyz (id) VALUES (%s)', key.bytes)
# print("""A single line string literal""" == "A single line string literal")
# print("""A single line stri"ng literal""")
# test = {'a':1, 'b': 'wefwaf'}
# test1 = [t for t, k in test.items()]
# print(test1)
# print(tuple(test1))
|
CharleyZhao123/graceful-few-shot | data/datasets/gb_100.py | <filename>data/datasets/gb_100.py
import os
import pickle
import random
from torch.utils.data import Dataset
from .datasets import dataset_register
default_split = {
'train': 0.7,
'val': 0.3,
}
@dataset_register('gb-100')
class GB100(Dataset):
def __init__(self, root_path, split='train', split_method='novel', **kwargs):
data_file_name = 'gb_dataset.pickle'
with open(os.path.join(root_path, data_file_name), 'rb') as f:
pack = pickle.load(f, encoding='latin1')
# 经过默认数据处理[Resize, ToTensor, normalize]的图像tensor,可直接输入Network
default_data = pack['data']
feature = pack['feature']
imgname = pack['imgname']
origin_label = pack['origin_label']
logits = pack['logits']
gb_label = pack['gb_label']
# 划分数据
g_index = []
b_index = []
for i, l in enumerate(gb_label):
if l == 1.0:
g_index.append(i)
else:
b_index.append(i)
if split_method == 'random':
# 随机抽取数据并划分数据
random.seed(0)
train_g_index = random.sample(g_index, int(
len(g_index)*default_split['train']))
val_g_index = list(set(g_index).difference(set(train_g_index)))
random.seed(1)
train_b_index = random.sample(b_index, int(
len(b_index)*default_split['train']))
val_b_index = list(set(b_index).difference(set(train_b_index)))
train_index = train_g_index + train_b_index
val_index = val_g_index + val_b_index
else:
# 前n个class为训练集, 后64-n个为验证集划分数据
t_class_num = int(default_split['train'] * 64) # n
v_class_num = 64 - t_class_num
train_g_index = g_index[:100*t_class_num]
val_g_index = g_index[100*t_class_num:]
train_b_index = b_index[:100*t_class_num]
val_b_index = b_index[100*t_class_num:]
train_index = train_g_index + train_b_index
val_index = val_g_index + val_b_index
if split == 'train':
self.index_list = train_index
else:
self.index_list = val_index
self.data = default_data
self.feature = feature
self.gb_label = gb_label
def __len__(self):
return len(self.index_list)
def __getitem__(self, i):
index = self.index_list[i]
return self.data[index], self.feature[index], int(self.gb_label[index])
if __name__ == '__main__':
gb_100 = GB100(
root_path='/space1/zhaoqing/dataset/fsl/gb-100', split='val', split_method='novel')
print(len(gb_100))
# random
# val 3840
# train 8960
# novel
# val 4000
# train 8800
|
CharleyZhao123/graceful-few-shot | train_gb_classify_network.py | import argparse
import torch
from models import build_model
from data import build_dataloader
import torch.nn.functional as F
import yaml
import utils
import os
from tqdm import tqdm
from tensorboardX import SummaryWriter
def main(config):
# ===== 准备记录以及log信息 =====
save_name = args.name
save_path = os.path.join('./save/train_gb_classify_network', save_name)
utils.ensure_path(save_path)
utils.set_log_path(save_path)
tb_writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
yaml.dump(config, open(os.path.join(save_path, 'config.yaml'), 'w'))
# ===== 准备数据、模型 =====
# train data
train_dataloader = build_dataloader(config['train_dataloader_args'])
# val data
val_dataloader = build_dataloader(config['val_dataloader_args'])
# model
gb_net_model = build_model(config['network_args'])
utils.log('num params: {}'.format(utils.compute_n_params(gb_net_model)))
# ===== 训练 =====
# optimizer
trainer_args = config['trainer_args']
optimizer, lr_scheduler = utils.make_optimizer(gb_net_model.parameters(), trainer_args['optimizer_name'], **trainer_args['optimizer_args'])
max_epoch = trainer_args['max_epoch']
save_epoch = trainer_args['save_epoch']
max_val_acc = 0.0
timer_used = utils.Timer()
timer_epoch = utils.Timer()
# 执行训练
for epoch in range(1, max_epoch + 1):
timer_epoch.s()
aves_keys = ['train_loss', 'train_acc', 'val_loss', 'val_acc']
aves = {k: utils.Averager() for k in aves_keys}
gb_net_model.train()
tb_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
for data, feature, gb_label in tqdm(train_dataloader, desc='train', leave=False):
feature = feature.cuda()
gb_label = gb_label.cuda()
_, gb_logits = gb_net_model(feature)
ce_loss = F.cross_entropy(gb_logits, gb_label)
gb_acc = utils.compute_acc(gb_logits, gb_label)
optimizer.zero_grad()
ce_loss.backward()
optimizer.step()
aves['train_loss'].add(ce_loss.item())
aves['train_acc'].add(gb_acc)
gb_logits = None
ce_loss = None
# 验证
gb_net_model.eval()
for data, feature, gb_label in tqdm(val_dataloader, desc='val', leave=False):
feature = feature.cuda()
gb_label = gb_label.cuda()
with torch.no_grad():
_, gb_logits = gb_net_model(feature)
ce_loss = F.cross_entropy(gb_logits, gb_label)
gb_acc = utils.compute_acc(gb_logits, gb_label)
aves['val_loss'].add(ce_loss.item())
aves['val_acc'].add(gb_acc)
if lr_scheduler is not None:
lr_scheduler.step()
# 是否多余?
for k, v in aves.items():
aves[k] = v.item()
# 记录log, 保存checkpoint
t_epoch = utils.time_str(timer_epoch.t())
t_used = utils.time_str(timer_used.t())
t_estimate = utils.time_str(timer_used.t() / epoch * max_epoch)
epoch_str = str(epoch)
log_str = 'epoch {}, train {:.4f}|{:.4f}'.format(
epoch_str, aves['train_loss'], aves['train_acc'])
tb_writer.add_scalars('loss', {'train': aves['train_loss']}, epoch)
tb_writer.add_scalars('acc', {'train': aves['train_acc']}, epoch)
log_str += ', val {:.4f}|{:.4f}'.format(aves['val_loss'], aves['val_acc'])
tb_writer.add_scalars('loss', {'val': aves['val_loss']}, epoch)
tb_writer.add_scalars('acc', {'val': aves['val_acc']}, epoch)
log_str += ', {} {}/{}'.format(t_epoch, t_used, t_estimate)
utils.log(log_str)
if config.get('_parallel'):
model_ = gb_net_model.module
else:
model_ = gb_net_model
training = config['trainer_args']
save_obj = {
'file': __file__,
'config': config,
'model_sd': model_.state_dict(),
'training': training,
}
torch.save(save_obj, os.path.join(save_path, 'epoch-last.pth'))
if (save_epoch is not None) and epoch % save_epoch == 0:
torch.save(save_obj, os.path.join(
save_path, 'epoch-{}.pth'.format(epoch)))
if aves['val_acc'] > max_val_acc:
max_val_acc = aves['val_acc']
torch.save(save_obj, os.path.join(save_path, 'max-val-acc.pth'))
tb_writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/train_gb_classify_network.yaml')
parser.add_argument('--name', default='train_gb_classify_network')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
main(config)
|
CharleyZhao123/graceful-few-shot | models/network/base_pretrain_network.py | import torch.nn as nn
from ..model_init import model_register, build_model
@model_register('base-pretrain-network')
class BasePretrainNetwork(nn.Module):
def __init__(self, encoder_name='resnet12', encoder_args={}, encoder_load_para={},
classifier_name='nn-classifier', classifier_args={}, classifier_load_para={}):
super().__init__()
self.encoder = build_model(
encoder_name, encoder_args, encoder_load_para)
classifier_args['in_dim'] = self.encoder.out_dim
self.classifier = build_model(
classifier_name, classifier_args, classifier_load_para)
def forward(self, x):
feature = self.encoder(x)
logits = self.classifier(feature)
return feature, logits
|
CharleyZhao123/graceful-few-shot | data/dataloader_init.py | from torch.utils.data import DataLoader
from . import datasets
from . import samplers
import sys
sys.path.append('..')
from utils import check_args
default_dataloader_args = {
# 数据集名称 ('img-mini-imagenet', 'mini-imagenet')
'dataset_name': 'img-mini-imagenet',
'dataset_args': {
'split': 'train', # 数据集划分名称 ('train', 'val', 'test')
'augment': 'default'
},
# 采样器名称 ('default-sampler', 'metatasks-sampler', 'sequential-sampler')
'sampler_name': 'default-sampler',
'batch_size': 128, # 批大小 (36, 48, 64, 128) (1, 4)
}
default_sampler_args = {
'batch_num': 200,
'shot_num': 1,
'way_num': 5,
'query_num': 15
}
def build_dataloader(dataloader_args=default_dataloader_args):
'''
构建dataloader
输入: dataloader_args
输出: 符合参数设定的dataloader
'''
# ===== 检查默认必备参数是否具备, 否则对其进行设定 =====
dataloader_args = check_args(default_dataloader_args, dataloader_args)
# ===== 设定dataset =====
dataset_name = dataloader_args['dataset_name']
dataset_args = dataloader_args['dataset_args']
dataset = datasets.make(dataset_name, dataset_args)
# ===== 设定sampler, dataloader =====
if dataloader_args['sampler_name'] == 'metatasks-sampler':
if not dataloader_args.get('sampler_args'):
sampler_args = default_dataloader_args
else:
sampler_args = check_args(
default_sampler_args, dataloader_args['sampler_args'])
sampler = samplers.MetatasksSampler(dataset.label, sampler_args['batch_num'],
sampler_args['way_num'], sampler_args['shot_num'] +
sampler_args['query_num'],
ep_per_batch=dataloader_args['batch_size'])
dataloader = DataLoader(
dataset, batch_sampler=sampler, num_workers=8, pin_memory=True)
elif dataloader_args['sampler_name'] == 'sequential-sampler':
dataloader = DataLoader(
dataset, dataloader_args['batch_size'], shuffle=False, num_workers=8, pin_memory=True)
else:
dataloader = DataLoader(
dataset, dataloader_args['batch_size'], shuffle=True, num_workers=8, pin_memory=False)
return dataloader
if __name__ == '__main__':
dataloader_args = {
'dataset_args': {
'name': 'img-mini-imagenet',
'split': 'test',
},
'batch_size': 64,
}
build_dataloader(dataloader_args)
|
CharleyZhao123/graceful-few-shot | data/samplers/__init__.py | <reponame>CharleyZhao123/graceful-few-shot
from .metatasks_sampler import MetatasksSampler |
CharleyZhao123/graceful-few-shot | models/model_init.py | # from utils import check_args
import torch
import clip
import sys
sys.path.append('..')
model_list = {}
def model_register(name):
'''
模型注册器
'''
def decorator(cls):
model_list[name] = cls
return cls
return decorator
def build_model(model_name, model_args={}, model_load_para={}, **kwargs):
'''
构建神经网络模型
输入: 神经网络模型各参数
输出: 初始化好参数(根据存储的参数或者随机初始化参数)的神经网络模型
'''
# ===== 构建模型 =====
# 直接从整体参数中构建, 不建议
if model_load_para.get('load'):
model_para = torch.load(model_load_para['load'])
model = model_list[model_para['model']](**model_para['model_args'])
model.load_state_dict(model_para['model_sd'])
elif model_name == 'clip':
device = "cuda" if torch.cuda.is_available() else "cpu"
model, _ = clip.load('ViT-B/32', device)
return model
# 构建随机初始化模型
else:
model = model_list[model_name](**model_args)
# ===== 选择加载子模块参数 =====
if model_load_para.get('load_encoder'):
encoder_para = torch.load(model_load_para['load_encoder'])
encoder = model_list[encoder_para['model']](**encoder_para['model_args']).encoder
model.encoder.load_state_dict(encoder.state_dict())
if model_load_para.get('load_classifier'):
classifier_para = torch.load(model_load_para['load_classifier'])
classifier = model_list[classifier_para['model']](**classifier_para['model_args']).classifier
model.classifier.load_state_dict(classifier.state_dict())
if model_load_para.get('load_pure_encoder'):
encoder_para = torch.load(model_load_para['load_pure_encoder'])
model.load_state_dict(encoder_para)
# 加载旧工程中的参数
if model_load_para.get('load_old_encoder'):
model_para = torch.load(model_load_para['load_old_encoder'])['model_sd']
encoder_dict = {}
for k, v in model_para.items():
if k[:8] == "encoder.":
k = k[8:]
encoder_dict[k] = v
if 'encoder' in dir(model):
model.encoder.load_state_dict(encoder_dict)
else:
model.load_state_dict(encoder_dict)
# ===== 其他 =====
if torch.cuda.is_available():
model.cuda()
return model
if __name__ == '__main__':
network_args = {
'model_name': 'resnet12',
'model_args': {},
'model_load_para':
{
'load_encoder': '/space1/zhaoqing/code/graceful-few-shot/models/backbone/pretrained/resnet18-f37072fd.pth',
},
'similarity_method': 'cos' # 'cos', 'sqr'
}
model_para = torch.load('/space1/zhaoqing/code/few_shot_meta_baseline/save/pre_meta_2_stage/linear/metabasepre2/max-tva.pth')['model_sd']
|
CharleyZhao123/graceful-few-shot | models/network/mva_network.py | <reponame>CharleyZhao123/graceful-few-shot<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from models import model_register, build_model
import utils
import utils.few_shot as fs
@model_register('mva-network')
class MVANetwork(nn.Module):
def __init__(self, encoder_name='resnet12', encoder_args={}, encoder_load_para={},
mva_name='dot-attention', mva_args={'update': False}, mva_load_para={}, task_info={},
similarity_method='cos'):
super().__init__()
# 子模块构建
self.encoder = build_model(
encoder_name, encoder_args, encoder_load_para)
self.mva = build_model(mva_name, mva_args, mva_load_para)
# 任务信息获得
self.batch_size = task_info['batch_size']
self.shot_num = task_info['shot_num']
self.way_num = task_info['way_num']
self.query_num = task_info['query_num']
# 其他
self.similarity_method = similarity_method
self.mva_name = mva_name
self.mva_update = mva_args['update']
def get_logits(self, proto_feat, query_feat):
'''
得到最终的分类logits
'''
if self.similarity_method == 'cos':
proto_feat = F.normalize(
proto_feat, dim=-1).permute(0, 1, 3, 2) # [T, Q, dim, W]
query_feat = F.normalize(
query_feat, dim=-1).unsqueeze(-2) # [T, Q, 1, dim]
logits = torch.matmul(query_feat, proto_feat) # [T, Q, 1, W]
elif self.similarity_method == 'sqr':
pass
logits = logits.squeeze(-2).view(-1, self.way_num)
return logits
def feature_augmentation(self, fkey, w_index, s_index, aug_type='none'):
'''
对特征进行增强
'''
if aug_type == 'none':
query_feat = fkey[0, w_index, s_index, :].clone().detach()
return query_feat
elif aug_type == 'zero':
query_feat = fkey[0, w_index, s_index, :].clone().detach()
zero_feat = torch.zeros_like(query_feat)
return zero_feat
elif aug_type == 'random':
query_feat = fkey[0, w_index, s_index, :].clone().detach()
random_mask = torch.randn_like(query_feat).cuda()
random_feat = query_feat + random_mask * 0.07
return random_feat
elif aug_type == 'way_mean':
way_feat = fkey[0, w_index, :, :].clone().detach() # [W, dim]
way_mean_feat = torch.mean(way_feat, dim=0)
return way_mean_feat
elif aug_type == 'way_other_mean':
way_feat = fkey[0, w_index, :, :].clone().detach() # [W, dim]
index = [i for i in range(self.shot_num)]
index.remove(s_index)
index = torch.tensor(index, dtype=torch.long).cuda()
way_other_feat = torch.index_select(way_feat, 0, index)
way_other_mean_feat = way_other_feat.mean(dim=0)
return way_other_mean_feat
def build_fake_trainset(self, key, choice_type='random', choice_num=3, aug_type='none', epoch=0):
'''
利用support set数据(key)构建假训练集
输入: key: [1, W, S, dim]
输出:
fkey: [1, W, S, dim]
fquery: [1, Q, dim]
flabel: [Q, W]
Q = W x choice_num
'''
# 准备
way_num = key.shape[1]
shot_num = key.shape[2]
dim = key.shape[3]
fquery = []
fkey = key.clone().detach()
flabel = []
# 生成筛选数据索引
if choice_type == 'random':
choice_id_list = []
for w in range(way_num):
random.seed(epoch+w)
w_choice_id_list = random.sample(
range(0, shot_num), choice_num)
choice_id_list.append(w_choice_id_list) # [[...], [], ...]
# 根据choice_id_list构建假训练集
for w_index, w_choice_id_list in enumerate(choice_id_list):
for s_index in w_choice_id_list:
# print(w_index, ' ', s_index)
# 得到用作query的key特征
query_feat = fkey[0, w_index, s_index, :].clone().detach()
fquery.append(query_feat)
# 构建label
query_label = w_index
flabel.append(query_label)
# 增强对应的原key特征
aug_feat = self.feature_augmentation(
fkey, w_index, s_index, aug_type)
fkey[0, w_index, s_index, :] = aug_feat
fquery = torch.stack(fquery, dim=0).unsqueeze(0).cuda()
flabel = torch.tensor(flabel, dtype=torch.long).cuda()
return fkey, fquery, flabel
def train_mva(self, key, epoch_num=30, enhance_threshold=0.0, enhance_top=10):
'''
使用support set数据(key)训练mva
epoch_num: 正常训练的epoch次数, 每个epoch构建的tasks是不同的, 有难有易
enhance_threshold: 判断是否需要对该epoch增强的阈值, 如果acc小于该阈值, 则增强
enhance_top: 增强次数上限, 最多增强该次数(包含)
'''
# 关键超参
aug_type = 'zero'
choice_num = 1
lr = 1e-2
l1 = True
# 优化器
optimizer = torch.optim.SGD(self.mva.parameters(), lr=lr,
momentum=0.9, dampening=0.9, weight_decay=0)
# 训练
with torch.enable_grad():
if enhance_threshold == 0.0:
for epoch in range(1, epoch_num+1):
fkey, fquery, flabel = self.build_fake_trainset(
key, choice_num=choice_num, aug_type=aug_type, epoch=epoch)
optimizer.zero_grad()
proto_feat = self.mva(fquery, fkey)
logits = self.get_logits(proto_feat, fquery)
loss = F.cross_entropy(logits, flabel)
# l1 正则化项
if l1:
l1_reg = 0.0
for param in self.mva.parameters():
l1_reg += torch.sum(torch.abs(param))
print(l1_reg)
loss += 0.01 * l1_reg
acc = utils.compute_acc(logits, flabel)
loss.backward()
optimizer.step()
print('mva train epoch: {} acc={:.2f} loss={:.2f}'.format(
epoch, acc, loss))
else:
epoch = 1
enhance_num = 0
while epoch < epoch_num + 1:
fkey, fquery, flabel = self.build_fake_trainset(
key, choice_num=choice_num, aug_type=aug_type, epoch=epoch)
optimizer.zero_grad()
proto_feat = self.mva(fquery, fkey)
logits = self.get_logits(proto_feat, fquery)
loss = F.cross_entropy(logits, flabel)
acc = utils.compute_acc(logits, flabel)
loss.backward()
optimizer.step()
print('mva train epoch: {} acc={:.2f} loss={:.2f}'.format(
epoch, acc, loss))
if acc < enhance_threshold:
enhance_num += 1
if enhance_num > enhance_top:
enhance_num = 0
epoch += 1
else:
print('mva train epoch enhance time: {}'.format(
enhance_num))
else:
enhance_num = 0
epoch += 1
print("mva train done.")
def forward(self, image):
# ===== 提取特征 =====
if 'encode_image' in dir(self.encoder):
feature = self.encoder.encode_image(image).float()
else:
feature = self.encoder(image)
# ===== 整理, 划分特征 =====
# shot_feat: [T, W, S, dim], query_feat: [T, Q, dim]
shot_feat, query_feat = fs.split_shot_query(
feature, self.way_num, self.shot_num, self.query_num, self.batch_size)
# ===== MVA训练 =====
# 启用MVA训练模式的情况下, 每个batch的任务数只能为1
if self.mva_update:
self.train_mva(key=shot_feat, epoch_num=10,
enhance_threshold=0.0, enhance_top=10)
# ===== 将特征送入MVA进行计算 =====
# proto_feat: [T, Q, W, dim]
proto_feat = self.mva(query_feat, shot_feat)
# ===== 得到最终的分类logits =====
logits = self.get_logits(proto_feat, query_feat)
return logits
|
CharleyZhao123/graceful-few-shot | models/component/__init__.py | from .multi_view_attention import DotAttention |
CharleyZhao123/graceful-few-shot | poor_python.py | <filename>poor_python.py
import pickle
import torch
import os
key = torch.rand(4, 15, 5, 512)
print(key[0, 0, 0, 0:10])
eye_base = torch.eye(512)
eye_repeat = eye_base.unsqueeze(0).repeat(15, 1, 1)
weight = eye_repeat
new_key = torch.tensordot(key, weight, dims=([3], [1]))
new_key = torch.matmul(key, weight)
print(new_key.shape)
print(new_key[0, 0, 0, 0:10])
|
CharleyZhao123/graceful-few-shot | clip_fsl.py | <filename>clip_fsl.py
import torch
import clip
from PIL import Image
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
dog_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_100.jpg")).unsqueeze(0).to(device)
cow_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_55.jpg")).unsqueeze(0).to(device)
cat_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_5.jpg")).unsqueeze(0).to(device)
goat_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_40.jpg")).unsqueeze(0).to(device)
rabbit_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_19.jpg")).unsqueeze(0).to(device)
pig_image = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_g/cat_g_19.jpg")).unsqueeze(0).to(device)
pig_image1 = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_w/cat_w_20.jpg")).unsqueeze(0).to(device)
pig_image2 = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_w/cat_w_40.jpg")).unsqueeze(0).to(device)
pig_image3 = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_t/cat_t_0.png")).unsqueeze(0).to(device)
pig_image4 = preprocess(Image.open(
"/space1/zhaoqing/dataset/fsl/animals/cat_s/cat_s_0.png")).unsqueeze(0).to(device)
text = clip.tokenize(["A photo of a cat"]).to(device)
with torch.no_grad():
dog_image_features = model.encode_image(dog_image)
cow_image_features = model.encode_image(cow_image)
cat_image_features = model.encode_image(cat_image)
goat_image_features = model.encode_image(goat_image)
rabbit_image_features = model.encode_image(rabbit_image)
pig_image_features = model.encode_image(pig_image)
pig_image1_features = model.encode_image(pig_image1)
pig_image2_features = model.encode_image(pig_image2)
pig_image3_features = model.encode_image(pig_image3)
pig_image4_features = model.encode_image(pig_image4)
# [5, 512]
image_features = torch.cat((dog_image_features, cow_image_features, cat_image_features, goat_image_features, rabbit_image_features,
pig_image_features, pig_image1_features, pig_image2_features, pig_image3_features, pig_image4_features), dim=0)
text_features = model.encode_text(text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
print(similarity)
# values, indices = similarity[0]
# logits_per_image, logits_per_text = model(image, text)
# probs = logits_per_image.softmax(dim=-1).cpu().numpy()
# print("Label probs:", probs) # prints: [[0.9927937 0.00421068 0.00299572]]
|
CharleyZhao123/graceful-few-shot | models/classifier/classifiers.py | <filename>models/classifier/classifiers.py
import math
import torch
import torch.nn as nn
import models
import utils
import sys
sys.path.append('..')
from models import model_register
@model_register('linear-classifier')
class LinearClassifier(nn.Module):
def __init__(self, in_dim, class_num):
super().__init__()
self.linear = nn.Linear(in_dim, class_num)
def forward(self, x):
return self.linear(x)
@model_register('nn-classifier')
class NNClassifier(nn.Module):
def __init__(self, in_dim, class_num, metric='cos', temp=None):
super().__init__()
self.proto = nn.Parameter(torch.empty(class_num, in_dim))
nn.init.kaiming_uniform_(self.proto, a=math.sqrt(5))
if temp is None:
if metric == 'cos':
temp = nn.Parameter(torch.tensor(10.))
else:
temp = 1.0
self.metric = metric
self.temp = temp
def forward(self, x):
if self.training:
return utils.compute_logits(x, self.proto, self.metric, self.temp)
else:
return utils.compute_logits(x, self.proto, self.metric, 1.0)
|
CharleyZhao123/graceful-few-shot | data/datasets/datasets.py | import os
DEFAULT_ROOT = '/space1/zhaoqing/dataset/fsl'
datasets = {}
def dataset_register(name):
def decorator(cls):
datasets[name] = cls
return cls
return decorator
def make(dataset_name, dataset_args):
if dataset_args.get('root_path') is None:
dataset_args['root_path'] = os.path.join(DEFAULT_ROOT, dataset_name)
dataset = datasets[dataset_name](**dataset_args)
return dataset
|
CharleyZhao123/graceful-few-shot | models/classifier/__init__.py | from .classifiers import LinearClassifier
from .classifiers import NNClassifier |
CharleyZhao123/graceful-few-shot | models/old_model/classifier.py | import torch.nn as nn
import models
import sys
sys.path.append('..')
from models import model_register
@model_register('classifier')
class Classifier(nn.Module):
def __init__(self, encoder, encoder_args,
classifier, classifier_args):
super().__init__()
self.encoder = models.make(encoder, **encoder_args)
classifier_args['in_dim'] = self.encoder.out_dim
classifier_args['class_num'] = classifier_args['n_classes']
classifier_args.pop('n_classes')
self.classifier = models.make(classifier, **classifier_args)
def forward(self, x):
x = self.encoder(x)
x = self.classifier(x)
return x |
CharleyZhao123/graceful-few-shot | models/network/gb_classify_network.py | import torch.nn as nn
import models
import sys
sys.path.append('..')
from models import model_register
@model_register('gb-classify-network')
class GBClassifyNetwork(nn.Module):
def __init__(self, encoder_name, encoder_args, classifier_name, classifier_args):
super().__init__()
# skip_encoder: 跳过encoder, 直接使用提取好的特征
self.skip_encoder = encoder_args['skip']
if not self.skip_encoder:
self.encoder = models.make(encoder_name, **encoder_args)
classifier_args['in_dim'] = self.encoder.out_dim
else:
classifier_args['in_dim'] = 512
self.classifier = models.make(classifier_name, **classifier_args)
def forward(self, x):
if not self.skip_encoder:
feature = self.encoder(x)
else:
feature = x
logits = self.classifier(feature)
return feature, logits
|
CharleyZhao123/graceful-few-shot | models/network/__init__.py | <filename>models/network/__init__.py<gh_stars>1-10
from .base_pretrain_network import BasePretrainNetwork
from .gb_classify_network import GBClassifyNetwork
from .mva_network import MVANetwork
from .patch_mva_network import PatchMVANetwork |
CharleyZhao123/graceful-few-shot | utils/check_arguments.py | <filename>utils/check_arguments.py
def check_args(default_args, input_args):
'''
检查默认必备参数是否具备, 否则对其进行设定
输入:
default_args: 默认参数词典
input_args: 输入的参数词典
输出:
new_args: 检查并纠正后的参数词典
'''
# 单独处理value为dict的情况
for k, v in default_args.items():
if type(v).__name__ == 'dict':
if input_args.get(k):
input_args[k] = check_args(v, input_args[k])
else:
input_args[k] = v
# 合并default_args input_args, key相同则input_args覆盖dafault_args
new_args = default_args.copy()
new_args.update(input_args)
return(new_args)
if __name__ == '__main__':
default_dataloader_args = {
'dataset_args': {
'name': 'img-mini-imagenet', # 数据集名称 ('img-mini-imagenet', 'mini-imagenet')
'split': 'train', # 数据集划分名称 ('train', 'val', 'test')
'augment': 'default'
},
'sampler': 'default-sampler', # 采样器名称 ('default-sampler', 'meta-sampler')
'batch_size': '128', # 批大小 (36, 48, 64, 128) (1, 4)
}
dataloader_args = {
'dataset_args': {
'name': 'img-mini-imagenet',
'split': 'test',
},
'batch_size': '64',
}
new_args = check_args(default_dataloader_args, dataloader_args)
print(new_args) |
CharleyZhao123/graceful-few-shot | models/backbone/__init__.py | from .resnet12 import Resnet12
from .resnet12 import Resnet12_wide
from .resnet18 import Resnet18 |
CharleyZhao123/graceful-few-shot | train_for_mushi.py | <reponame>CharleyZhao123/graceful-few-shot
import os
import argparse
import utils
utils.set_gpu('6')
import torch
from models import build_model
from data import build_dataloader
import torch.nn.functional as F
import yaml
from tqdm import tqdm
from tensorboardX import SummaryWriter
def main(config):
# ===== 准备记录以及log信息 =====
save_name = args.name
save_path = os.path.join('./save/train_for_mushi', save_name)
utils.ensure_path(save_path)
utils.set_log_path(save_path)
tb_writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
yaml.dump(config, open(os.path.join(save_path, 'config.yaml'), 'w'))
# ===== 设定随机种子 =====
utils.set_seed(1)
# ===== 准备数据、模型 =====
# sim train data
mix_train_dataloader = build_dataloader(
config['mix_train_dataloader_args'])
# sim train data
sim_train_dataloader = build_dataloader(
config['sim_train_dataloader_args'])
# true train data
true_train_dataloader = build_dataloader(
config['true_train_dataloader_args'])
# final train data
train_dataloader = true_train_dataloader
# sim val data
sim_val_dataloader = build_dataloader(config['sim_val_dataloader_args'])
# true val data
true_val_dataloader = build_dataloader(config['true_val_dataloader_args'])
# model
network_args = config['network_args']
pretrain_model = build_model(network_args['model_name'], network_args['model_args'])
utils.log('num params: {}'.format(utils.compute_n_params(pretrain_model)))
# ===== 训练 =====
# optimizer
trainer_args = config['trainer_args']
optimizer, lr_scheduler = utils.make_optimizer(pretrain_model.parameters(
), trainer_args['optimizer_name'], **trainer_args['optimizer_args'])
max_epoch = trainer_args['max_epoch']
save_epoch = trainer_args['save_epoch']
max_sim_val_acc = 0.0
max_sim_val_acc_epoch = 0
max_true_val_acc = 0.0
max_true_val_acc_epoch = 0
timer_used = utils.Timer()
timer_epoch = utils.Timer()
# 执行训练
for epoch in range(1, max_epoch + 1):
timer_epoch.s()
aves_keys = ['train_loss', 'train_acc', 'sim_val_loss',
'sim_val_acc', 'true_val_loss', 'true_val_acc']
aves = {k: utils.Averager() for k in aves_keys}
pretrain_model.train()
tb_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
for image, label in tqdm(train_dataloader, desc='train', leave=False):
image = image.cuda()
label = label.cuda()
_, logits = pretrain_model(image)
ce_loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
optimizer.zero_grad()
ce_loss.backward()
optimizer.step()
aves['train_loss'].add(ce_loss.item())
aves['train_acc'].add(acc)
logits = None
ce_loss = None
# 验证
# sim
pretrain_model.eval()
for image, label in tqdm(sim_val_dataloader, desc='sim val', leave=False):
image = image.cuda()
label = label.cuda()
with torch.no_grad():
_, logits = pretrain_model(image)
ce_loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
aves['sim_val_loss'].add(ce_loss.item())
aves['sim_val_acc'].add(acc)
# true
pretrain_model.eval()
for image, label in tqdm(true_val_dataloader, desc='true val', leave=False):
image = image.cuda()
label = label.cuda()
with torch.no_grad():
_, logits = pretrain_model(image)
ce_loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
aves['true_val_loss'].add(ce_loss.item())
aves['true_val_acc'].add(acc)
if lr_scheduler is not None:
lr_scheduler.step()
# 是否多余?
for k, v in aves.items():
aves[k] = v.item()
# 记录log, 保存checkpoint
t_epoch = utils.time_str(timer_epoch.t())
t_used = utils.time_str(timer_used.t())
t_estimate = utils.time_str(timer_used.t() / epoch * max_epoch)
epoch_str = str(epoch)
log_str = 'epoch {}, train {:.4f}|{:.4f}'.format(
epoch_str, aves['train_loss'], aves['train_acc'])
tb_writer.add_scalars('loss', {'train': aves['train_loss']}, epoch)
tb_writer.add_scalars('acc', {'train': aves['train_acc']}, epoch)
log_str += ', sim val {:.4f}|{:.4f}'.format(
aves['sim_val_loss'], aves['sim_val_acc'])
tb_writer.add_scalars(
'sim loss', {'sim val': aves['sim_val_loss']}, epoch)
tb_writer.add_scalars(
'sim acc', {'sim val': aves['sim_val_acc']}, epoch)
log_str += ', true val {:.4f}|{:.4f}'.format(
aves['true_val_loss'], aves['true_val_acc'])
tb_writer.add_scalars(
'true loss', {'true val': aves['true_val_loss']}, epoch)
tb_writer.add_scalars(
'true acc', {'true val': aves['true_val_acc']}, epoch)
log_str += ', {} {}/{}'.format(t_epoch, t_used, t_estimate)
utils.log(log_str)
if config.get('_parallel'):
model_ = pretrain_model.module
else:
model_ = pretrain_model
training = config['trainer_args']
save_obj = {
'file': __file__,
'config': config,
'model_sd': model_.state_dict(),
'training': training,
}
torch.save(save_obj, os.path.join(save_path, 'epoch-last.pth'))
if (save_epoch is not None) and epoch % save_epoch == 0:
torch.save(save_obj, os.path.join(
save_path, 'epoch-{}.pth'.format(epoch)))
if aves['sim_val_acc'] > max_sim_val_acc:
max_sim_val_acc = aves['sim_val_acc']
max_sim_val_acc_epoch = epoch
torch.save(save_obj, os.path.join(
save_path, 'max-sim-val-acc.pth'))
if aves['true_val_acc'] > max_true_val_acc:
max_true_val_acc = aves['true_val_acc']
max_true_val_acc_epoch = epoch
torch.save(save_obj, os.path.join(
save_path, 'max-true-val-acc.pth'))
tb_writer.flush()
log_str = 'max sim val epoch: {}, acc: {}'.format(
max_sim_val_acc_epoch, max_sim_val_acc)
log_str += '\n'
log_str += 'max true val epoch: {}, acc: {}'.format(
max_true_val_acc_epoch, max_true_val_acc)
utils.log(log_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/train_for_mushi.yaml')
parser.add_argument('--name', default='train_for_mushi')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
main(config)
|
CharleyZhao123/graceful-few-shot | data/datasets/__init__.py | from .datasets import make
from .img_mini_imagenet import ImgMiniImageNet
from .gb_100 import GB100
from .mushi import Mushi
from .cub import CUB
|
CharleyZhao123/graceful-few-shot | test_mva.py | <reponame>CharleyZhao123/graceful-few-shot
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
import argparse
import torch
import torch.nn.functional as F
import numpy as np
import yaml
import utils
import utils.few_shot as fs
from tqdm import tqdm
import scipy.stats
from models import build_model
from data import build_dataloader
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return h
def main(config):
# ===== 准备记录以及log信息 =====
save_name = args.name
save_path = os.path.join('./save/test_mva', save_name)
utils.ensure_path(save_path)
utils.set_log_path(save_path)
yaml.dump(config, open(os.path.join(save_path, 'config.yaml'), 'w'))
# ===== 准备数据、模型 =====
# dataloader
test_dataloader = build_dataloader(config['test_dataloader_args'])
# model
network_args = config['network_args']
model = build_model(network_args['model_name'], network_args['model_args'])
# 准备备用mva model, 保证每个task都是初始的mva, 不受之前的影响
mva_name = network_args['model_args']['mva_name']
mva_args = network_args['model_args']['mva_args']
if mva_args.get('update'):
update_mva = mva_args['update']
else:
update_mva = False
origin_mva_model = build_model(mva_name, mva_args)
utils.log('num params: {}'.format(utils.compute_n_params(model)))
# task信息
test_dataloader_args = config['test_dataloader_args']
task_per_batch = test_dataloader_args['batch_size']
test_sampler_args = test_dataloader_args['sampler_args']
way_num = test_sampler_args['way_num']
shot_num = test_sampler_args['shot_num']
query_num = test_sampler_args['query_num']
# ===== 设定随机种子 =====
utils.set_seed(1)
# ===== 测试 =====
test_epochs = args.test_epochs
aves_keys = ['test_loss', 'test_acc']
aves = {k: utils.Averager() for k in aves_keys}
test_acc_list = []
model.eval()
for epoch in range(1, test_epochs + 1):
for image, _, _ in tqdm(test_dataloader, leave=False):
image = image.cuda() # No Patch: [320, 3, 80, 80]; Patch: [320, 10, 3, 80, 80]
# 重载mva参数
if update_mva:
model.mva.load_state_dict(origin_mva_model.state_dict())
with torch.no_grad():
# [320, 5]: 320 = 4 x (5 x (1 + 15))
logits = model(image)
label = fs.make_nk_label(
way_num, query_num, task_per_batch).cuda()
loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
aves['test_loss'].add(loss.item(), len(image))
aves['test_acc'].add(acc, len(image))
test_acc_list.append(acc)
log_str = 'test epoch {}: acc={:.2f} +- {:.2f} (%), loss={:.4f}'.format(
epoch,
aves['test_acc'].item() * 100,
mean_confidence_interval(test_acc_list) * 100,
aves['test_loss'].item()
)
utils.log(log_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/test_mva_network_sampling.yaml')
parser.add_argument('--name', default='test_mva_network')
parser.add_argument('--test-epochs', type=int, default=1)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
main(config)
|
CharleyZhao123/graceful-few-shot | data/datasets/mushi.py | import os
import pickle
from PIL import Image
import json
import torch
import clip
from torch.utils.data import Dataset
from torchvision import transforms
from .datasets import dataset_register
default_split = {
'train': 0.7,
'val': 0.3,
}
name2label = {
'person': 0,
'tank': 3,
'carrier': 2,
'armored': 1,
'car': 4,
'radar': 5,
'launch': 6
}
@dataset_register('mushi')
class Mushi(Dataset):
def __init__(self, root_path, split='train', augment='default',
type='sim_data', shot_num=70, query_num=15, return_items=2, **kwargs):
self.root_path = root_path
self.split = split
self.type = type
# ===== 按照数据集类型加载和整理数据 =====
if self.type == 'sim_data':
self.image, self.label = self.get_sim_data(shot_num, query_num)
elif self.type == 'true_data':
self.image, self.label = self.get_true_data(shot_num, query_num)
elif self.type == 'mix_data':
self.image, self.label = self.get_mix_data(shot_num, query_num)
# ===== 预处理数据 =====
image_size = 224 # 80
norm_params = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
normalize = transforms.Normalize(**norm_params)
self.default_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
normalize,
])
if augment == 'crop':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'resize':
self.transform = transforms.Compose([
transforms.Resize([image_size, image_size]), # transforms.Resize(image_size)
# transforms.RandomCrop(image_size, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'clip':
device = "cuda" if torch.cuda.is_available() else "cpu"
_, preprocess = clip.load('ViT-B/32', device)
self.transform = preprocess
else:
self.transform = self.default_transform
def convert_raw(x):
mean = torch.tensor(norm_params['mean']).view(3, 1, 1).type_as(x)
std = torch.tensor(norm_params['std']).view(3, 1, 1).type_as(x)
return x * std + mean
self.convert_raw = convert_raw
# 其他
self.return_items = return_items
def get_sim_data(self, shot_num, query_num):
'''
训练集: 仿真数据
测试集: 仿真数据
'''
# 加载仿真数据集信息json文件
info_json_file = os.path.join(self.root_path, 'dataset_info.json')
with open(info_json_file, 'r') as json_f:
info_dict = json.load(json_f)
# 整理数据
train_image = []
train_label = []
val_image = []
val_label = []
father_info = info_dict['fathers']
for f in father_info:
f_class_num = f['num']
f_label = int(name2label[f['name']])
# 暂时跳过radar和launch
if f_label == 5 or f_label == 6:
continue
# 划分给训练集
f_train_num = int(f_class_num*default_split['train'])
f_train_image = f['images'][:f_train_num][:shot_num]
f_train_label = ([f_label] * f_train_num)[:shot_num]
train_image += f_train_image
train_label += f_train_label
# 划分给验证集
f_val_num = f_class_num - f_train_num
f_val_image = f['images'][f_train_num:][:query_num]
f_val_label = ([f_label] * f_val_num)[:query_num]
val_image += f_val_image
val_label += f_val_label
if self.split == 'train':
image = train_image
label = train_label
else:
image = val_image
label = val_label
return image, label
def get_true_data(self, shot_num, query_num):
'''
训练集: 真实数据
测试集: 真实数据
'''
true_floder_path = os.path.join(self.root_path, 'true')
class_floder_list = os.listdir(true_floder_path)
# 整理数据
train_image = []
train_label = []
val_image = []
val_label = []
for c in class_floder_list:
c_label = int(name2label[c])
c_floder_path = os.path.join(true_floder_path, c)
c_image_list = os.listdir(c_floder_path)
c_image_list.sort(key=lambda x: int(x[:-4]))
# print(c_image_list)
# 处理image path
c_image_list = [os.path.join('true', c, p) for p in c_image_list]
c_class_num = len(c_image_list)
# 划分给训练集
c_train_num = int(c_class_num*default_split['train'])
c_train_image = c_image_list[:c_train_num][:shot_num]
c_train_label = ([c_label] * c_train_num)[:shot_num]
train_image += c_train_image
train_label += c_train_label
# 划分给验证集
c_val_num = c_class_num - c_train_num
c_val_image = c_image_list[c_train_num:][-query_num:]
c_val_label = ([c_label] * c_val_num)[-query_num:]
val_image += c_val_image
val_label += c_val_label
if self.split == 'train':
image = train_image
label = train_label
else:
image = val_image
label = val_label
return image, label
def get_mix_data(self, shot_num, query_num):
'''
训练集: 仿真和真实数据按比例混合得到
测试集: 全部为真实数据
'''
# 训练集为混合数据
if self.split == 'train':
sim_true_rate = {
'sim': 0.7,
'true': 0.3
}
sim_shot_num = int(shot_num*sim_true_rate['sim'])
true_shot_num = shot_num - sim_shot_num
# 仿真数据
sim_image, sim_label = self.get_sim_data(sim_shot_num, query_num)
# 真实数据
true_image, true_label = self.get_true_data(
true_shot_num, query_num)
image = sim_image + true_image
label = sim_label + true_label
# 测试集为真实数据
else:
image, label = self.get_true_data(shot_num, query_num)
return image, label
def __len__(self):
return len(self.image)
def __getitem__(self, index):
image_path = self.image[index].replace(
'label', 'origin').replace('json', 'png')
image_path = os.path.join(self.root_path, image_path)
# print(image_path)
image = Image.open(image_path).convert('RGB')
if self.return_items == 3:
fake_data = 'fake_name'
return self.transform(image), self.label[index], fake_data
else:
return self.transform(image), self.label[index]
if __name__ == '__main__':
mushi = Mushi(
root_path='/space1/zhaoqing/dataset/fsl/mushi', split='val', type='true_data')
print(mushi.__getitem__(0))
# sim: train: 402, val: 178;
# true v1: train: 407 val: 174;
# true v2: train: 403 val: 171;
print(len(mushi))
|
CharleyZhao123/graceful-few-shot | data/datasets/cub.py | import os
import pickle
from PIL import Image
import torch
import clip
from torch.nn.functional import normalize
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
from .datasets import dataset_register
@dataset_register('cub')
class CUB(Dataset):
def __init__(self, root_path, split='train', return_items=2, **kwargs):
split_file = '{}.csv'.format(split)
IMAGE_PATH = os.path.join(root_path)
SPLIT_PATH = os.path.join(root_path, 'split', split_file)
lines = [x.strip() for x in open(SPLIT_PATH, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
if split == 'train':
lines.pop(5846) # this image is broken
for l in lines:
context = l.split(',')
name = context[0]
wnid = context[1]
path = os.path.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
image_size = 84
self.data = data
self.label = label
self.n_classes = np.unique(np.array(label)).shape[0]
normalize = transforms.Normalize(
np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
self.default_transform=transforms.Compose([
transforms.Resize([92, 92]),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
])
augment=kwargs.get('augment')
if augment == 'resize':
self.transform=transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'crop':
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomCrop(image_size, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'clip':
device = "cuda" if torch.cuda.is_available() else "cpu"
_, preprocess = clip.load('ViT-B/32', device)
self.transform = preprocess
else:
self.transform = self.default_transform
# 其他
self.return_items = return_items
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = self.transform(Image.open(path).convert('RGB'))
if self.return_items == 3:
return image, label, 'fake_name'
else:
return image, label
|
CharleyZhao123/graceful-few-shot | data/__init__.py | <reponame>CharleyZhao123/graceful-few-shot
from .dataloader_init import build_dataloader
from .samplers import MetatasksSampler
from .datasets import ImgMiniImageNet, Mushi, GB100, CUB
|
CharleyZhao123/graceful-few-shot | test_encoder_fsl.py | <filename>test_encoder_fsl.py
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
import argparse
import torch
import torch.nn.functional as F
import numpy as np
import yaml
import utils
import utils.few_shot as fs
from tqdm import tqdm
import scipy.stats
from models import build_model
from data import build_dataloader
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return h
def main(config):
# ===== 准备记录以及log信息 =====
save_name = args.name
save_path = os.path.join('./save/test_encoder_fsl', save_name)
utils.ensure_path(save_path)
utils.set_log_path(save_path)
yaml.dump(config, open(os.path.join(save_path, 'config.yaml'), 'w'))
# ===== 准备数据、模型 =====
# dataloader
test_dataloader = build_dataloader(config['test_dataloader_args'])
# model
network_args = config['network_args']
model = build_model(network_args['model_name'], network_args['model_args'], network_args['model_load_para'])
utils.log('num params: {}'.format(utils.compute_n_params(model)))
# task信息
test_dataloader_args = config['test_dataloader_args']
task_per_batch = test_dataloader_args['batch_size']
test_sampler_args = test_dataloader_args['sampler_args']
way_num = test_sampler_args['way_num']
shot_num = test_sampler_args['shot_num']
query_num = test_sampler_args['query_num']
# ===== 设定随机种子 =====
utils.set_seed(1)
# ===== 测试 =====
test_epochs = args.test_epochs
aves_keys = ['test_loss', 'test_acc']
aves = {k: utils.Averager() for k in aves_keys}
test_acc_list = []
model.eval()
for epoch in range(1, test_epochs + 1):
for image, _, _ in tqdm(test_dataloader, leave=False):
image = image.cuda() # [320, 3, 224, 224]
with torch.no_grad():
# [320, 512]: 320 = 4 x (5 x (1 + 15))
if 'encode_image' in dir(model):
image_feature = model.encode_image(image).float()
else:
image_feature = model(image)
# 划分shot和query
# x_shot: [4, 5, 1, 512]
# x_query: [4, 75, 512]
x_shot, x_query = fs.split_shot_query(
image_feature, way_num, shot_num, query_num, task_per_batch)
# 计算相似度和logits
if config['similarity_method'] == 'cos':
x_shot = x_shot.mean(dim=-2)
x_shot = F.normalize(x_shot, dim=-1)
x_query = F.normalize(x_query, dim=-1)
metric = 'dot'
elif config['similarity_method'] == 'sqr':
x_shot = x_shot.mean(dim=-2)
metric = 'sqr'
logits = utils.compute_logits(
x_query, x_shot, metric=metric, temp=1.0).view(-1, way_num)
label = fs.make_nk_label(
way_num, query_num, task_per_batch).cuda()
loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
aves['test_loss'].add(loss.item(), len(image))
aves['test_acc'].add(acc, len(image))
test_acc_list.append(acc)
log_str = 'test epoch {}: acc={:.2f} +- {:.2f} (%), loss={:.4f}'.format(
epoch,
aves['test_acc'].item() * 100,
mean_confidence_interval(test_acc_list) * 100,
aves['test_loss'].item()
)
utils.log(log_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/test_encoder_fsl.yaml')
parser.add_argument('--name', default='test_encoder_fsl')
parser.add_argument('--test-epochs', type=int, default=1)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
main(config)
|
CharleyZhao123/graceful-few-shot | models/component/multi_view_attention.py | '''
可复用多视角注意力模型:
Simple-Query:
基础形式
输入: 一组Query向量和一组Key向量. Q: [T, Q, dim], K: [T, W, S, dim];
输出: 针对Query向量中每个Query的一个Key的聚合. O: [T, Q, W, dim]
示例: 每个batch有4个task的5-way 5-shot 15-query任务:
输入: Q: [4, 5x15, 512], K: [4, 5, 5, 512]
输出: O: [4, 5x15, 5, 512]
Multi-Query:
拓展形式, 暂不考虑
'''
from models import model_register
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import sys
sys.path.append('..')
@model_register('dot-attention')
class DotAttention(nn.Module):
'''
无参点积Attention
'''
def __init__(self, dim=512, use_scaling=False, similarity_method='cos', nor_type='l2_norm', **kargs):
super().__init__()
self.dim = dim
self.use_scaling = use_scaling
self.scaling = torch.sqrt(torch.tensor(dim).float())
self.similarity_method = similarity_method
self.nor_type = nor_type
def forward(self, query, key):
# 整理tensor便于计算
query_num = query.shape[1]
way_num = key.shape[1]
key = key.unsqueeze(1).repeat(
1, query_num, 1, 1, 1) # [T, Q, W, S, dim]
query = query.unsqueeze(2).repeat(
1, 1, way_num, 1).unsqueeze(-2) # [T, Q, W, 1, dim]
# 计算Query与Key的相似度
if self.similarity_method == 'cos':
nor_query = F.normalize(query, dim=-1)
nor_key = F.normalize(key, dim=-1)
sim = torch.matmul(nor_query, nor_key.permute(
0, 1, 2, 4, 3)) # [T, Q, W, 1, S]
else:
sim = torch.matmul(query, key.permute(
0, 1, 2, 4, 3)) # [T, Q, W, 1, S]
# 处理相似度
if self.use_scaling:
sim = sim / self.scaling
if self.nor_type == 'softmax':
sim = F.softmax(sim, dim=-1)
elif self.nor_type == 'l2_norm':
sim = F.normalize(sim, dim=-1)
# print(sim[0, 0, 0, 0, :])
# 加权(相似度)求和
output = torch.matmul(sim, key).squeeze(-2) # [T, Q, W, dim]
return output
class LinearTrans(nn.Module):
'''
线性变换类
'''
def __init__(self, dim=512, way_num=5, out_type='query', w_type='eye_add'):
'''
w_type:
eye_dafault: w矩阵为一个对角阵, 全部参数可优化
eye_add: w矩阵为两个矩阵相加得到, 初始为对角阵, 加上的初始为全0的矩阵可优化
'''
super(LinearTrans, self).__init__()
self.out_type = out_type
self.w_type = w_type
self.dim = dim
self.way_num = way_num
# 构建映射矩阵
if self.out_type == 'query':
if self.w_type == 'eye_add':
self.weight = nn.Parameter(torch.zeros((dim, dim)))
else: # eye_default
eye_base = torch.eye(dim)
self.weight = nn.Parameter(eye_base) # [dim, dim]
else: # key, value
if self.w_type == 'eye_add':
self.weight = nn.Parameter(torch.zeros(way_num, dim, dim))
else: # eye_default
eye_base = torch.eye(dim)
eye_repeat = eye_base.unsqueeze(0).repeat(way_num, 1, 1)
self.weight = nn.Parameter(eye_repeat) # [way_num, dim, dim]
def forward(self, in_feat):
output_weight = self.weight
if self.out_type == 'query':
# [1, Q, dim]
# print(self.weight[0, 1])
if self.w_type == 'eye_add':
output_weight = output_weight + torch.eye(self.dim).cuda()
out_feat = torch.tensordot(in_feat, output_weight, dims=([2], [0]))
else:
# [1, W, S, dim]
if self.w_type == 'eye_add':
eye_base = torch.eye(self.dim)
eye_repeat = eye_base.unsqueeze(0).repeat(self.way_num, 1, 1)
output_weight = output_weight + eye_repeat.cuda()
out_feat = torch.matmul(in_feat, output_weight.unsqueeze(0))
return out_feat
@model_register('w-attention')
class WAttention(nn.Module):
'''
含KQV映射矩阵的Attention
输入: Q: [1, Q, dim], K: [1, W, S, dim]
'''
def __init__(self, dim=512, use_scaling=False, similarity_method='cos',
nor_type='l2_norm', way_num=5, w_type='add', **kargs):
super().__init__()
self.dim = dim
self.use_scaling = use_scaling
self.scaling = torch.sqrt(torch.tensor(dim).float())
self.similarity_method = similarity_method
self.nor_type = nor_type
self.way_num = way_num
# 构建参数矩阵
self.query_trans = LinearTrans(self.dim, self.way_num, 'query', w_type)
self.key_trans = LinearTrans(self.dim, self.way_num, 'key', w_type)
self.value_trans = LinearTrans(self.dim, self.way_num, 'value', w_type)
def forward(self, query, key):
# ===== 准备 =====
query_num = query.shape[1]
way_num = key.shape[1]
# ===== 线性变换 =====
new_query=self.query_trans(query) # [1, Q, dim]
new_key=self.key_trans(key) # [1, W, S, dim]
new_value=self.value_trans(key) # [1, W, S, dim]
# ===== 得到prototype特征 =====
# 整理tensor便于计算
new_key = new_key.unsqueeze(1).repeat(
1, query_num, 1, 1, 1) # [T, Q, W, S, dim]
new_value = new_value.unsqueeze(1).repeat(
1, query_num, 1, 1, 1) # [T, Q, W, S, dim]
new_query = new_query.unsqueeze(2).repeat(
1, 1, way_num, 1).unsqueeze(-2) # [T, Q, W, 1, dim]
# 计算Query与Key的相似度
if self.similarity_method == 'cos':
nor_query = F.normalize(new_query, dim=-1)
nor_key = F.normalize(new_key, dim=-1)
sim = torch.matmul(nor_query, nor_key.permute(
0, 1, 2, 4, 3)) # [T, Q, W, 1, S]
else:
pass
# 处理相似度
if self.use_scaling:
sim = sim / self.scaling
if self.nor_type == 'softmax':
sim = F.softmax(sim, dim=-1)
elif self.nor_type == 'l2_norm':
sim = F.normalize(sim, dim=-1)
# print(sim[0, 0, 0, 0, :])
# 加权(相似度)求和
proto_feat = torch.matmul(sim, new_value).squeeze(-2) # [T, Q, W, dim]
return proto_feat
if __name__ == '__main__':
query=torch.rand((4, 75, 512))
key=torch.rand((4, 5, 5, 512))
w_attention=WAttention()
output = w_attention(query, key)
print(output.shape)
|
CharleyZhao123/graceful-few-shot | models/__init__.py | <gh_stars>1-10
from .model_init import build_model, model_register
from .backbone import Resnet12, Resnet18
from .classifier import LinearClassifier, NNClassifier
from .component import DotAttention
from .network import GBClassifyNetwork, BasePretrainNetwork
from .old_model import Classifier |
CharleyZhao123/graceful-few-shot | utils/organize_dataset.py | import json
import csv
from json import encoder
def organize_dataset(input_info, input_dataset_info_path):
'''
为save/dataset_info/train_class_info.json增加中文名称字段
todo: 目前保存的是Unicode码, 可修改为对应汉字
'''
output_info = {}
for n, label in input_info.items():
class_info = {
"label": label
}
dataset_info_reader = csv.reader(open(input_dataset_info_path))
for line in dataset_info_reader:
if str(line[1]) == str(n):
class_info['chinese_name'] = line[2]
break
output_info[n] = class_info
return output_info
if __name__ == '__main__':
input_info_path = '/space1/zhaoqing/code/graceful-few-shot/save/dataset_info/train_class_info.json'
input_dataset_info_path = '/space1/zhaoqing/code/graceful-few-shot/save/imagenet_chinesename.csv'
output_info_path = '/space1/zhaoqing/code/graceful-few-shot/save/dataset_info/train_class_info_plus.json'
with open(input_info_path, 'r') as load_f:
input_info = json.load(load_f)
output_info = organize_dataset(input_info, input_dataset_info_path)
json_str = json.dumps(output_info)
with open(output_info_path, 'w') as json_f:
json_f.write(json_str)
|
CharleyZhao123/graceful-few-shot | data/datasets/img_mini_imagenet.py | <reponame>CharleyZhao123/graceful-few-shot
import os
import pickle
from PIL import Image
import torch
import clip
from torch.utils.data import Dataset
from torchvision import transforms
from .datasets import dataset_register
@dataset_register('img-mini-imagenet')
class ImgMiniImageNet(Dataset):
def __init__(self, root_path, split='train', patch_type='none',**kwargs):
split_file = '{}.csv'.format(split)
IMAGE_PATH = os.path.join(root_path, 'images')
SPLIT_PATH = os.path.join(root_path, 'split', split_file)
self.patch_type=patch_type # 划分Patch的方式
lines = [x.strip() for x in open(SPLIT_PATH, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
for l in lines:
name, wnid = l.split(',')
path = os.path.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
image_size = 80
self.data = data
self.label = label
self.n_classes = max(self.label) + 1
# ===== 数据增强设置 =====
norm_params = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
normalize = transforms.Normalize(**norm_params)
# 非Patch模式下的数据增强
self.default_transform = transforms.Compose([
transforms.Resize([80, 80]),
transforms.ToTensor(),
normalize,
])
augment = kwargs.get('augment')
if augment == 'resize':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'crop':
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomCrop(image_size, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif augment == 'clip':
device = "cuda" if torch.cuda.is_available() else "cpu"
_, preprocess = clip.load('ViT-B/32', device)
self.transform = preprocess
else:
self.transform = self.default_transform
# Patch模式下的数据增强
if self.patch_type == 'sampling':
image_size = 80
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
# transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
# np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
def convert_raw(x):
mean = torch.tensor(norm_params['mean']).view(3, 1, 1).type_as(x)
std = torch.tensor(norm_params['std']).view(3, 1, 1).type_as(x)
return x * std + mean
self.convert_raw = convert_raw
def __len__(self):
return len(self.data)
def __getitem__(self, i):
imgname = self.data[i][-21:]
if self.patch_type == 'none':
img = Image.open(self.data[i]).convert('RGB')
return self.transform(img), self.label[i], imgname
elif self.patch_type == 'gird':
pass
elif self.patch_type == 'sampling':
patch_list = []
# patch_list第一个元素为原图
patch_list.append(self.default_transform(Image.open(self.data[i]).convert('RGB')))
# patch_list后续元素为patch图
extra_patch_num = 19
for _ in range(extra_patch_num):
patch_list.append(self.transform(Image.open(self.data[i]).convert('RGB')))
patch_list = torch.stack(patch_list, dim=0) # [1+patch_num, 3, 80, 80]
return patch_list, self.label[i], imgname
|
CharleyZhao123/graceful-few-shot | get_best_base_data.py | <reponame>CharleyZhao123/graceful-few-shot
import torch
import torch.nn as nn
import os
import yaml
import json
import pickle
import utils
import argparse
from data import build_dataloader
from models import build_model
from tqdm import tqdm
base_proto_f_path = 'visualization/saved_data/base_prototype_feature.pth'
base_proto_l_path = 'visualization/saved_data/base_prototype_label.pth'
def get_best_base_sample(config):
'''
获得数据集中与Good Prototype距离最近以及最远的图像
'''
# ===== 准备数据、模型 =====
# nn classifier base good prototype
# base_proto_f = torch.load(base_proto_f_path)
# base_proto_l = torch.load(base_proto_l_path)
# dataloader
train_dataloader = build_dataloader(config['train_dataloader_args'])
# model
model = build_model(config['network_args'])
if config.get('_parallel'):
model = nn.DataParallel(model)
model.eval()
# ===== 记录训练集图像信息 =====
# 记录或读取类别编号和名称匹配信息
# classname_dict = {}
# 记录label, imgname, data-proto-distance信息
imgname_list = []
label_tensor = torch.zeros([1])
data_tensor = torch.zeros([1, 3, 80, 80]).cuda()
feature_tensor = torch.zeros([1, 512]).cuda()
logits_tensor = torch.zeros([1, 64]).cuda()
for data, label, imgname in tqdm(train_dataloader, leave=False):
data = data.cuda() # [1, 3, 80, 80]
if False:
# 记录类别编号和名称匹配信息
if imgname[0][0:9] not in classname_dict:
classname_dict[imgname[0][0:9]] = label[0].item()
with torch.no_grad():
feature, logits = model(data)
imgname_list.append(imgname[0])
data_tensor = torch.cat((data_tensor, data), 0)
label_tensor = torch.cat((label_tensor, label), 0)
feature_tensor = torch.cat((feature_tensor, feature), 0)
logits_tensor = torch.cat((logits_tensor, logits), 0)
data_tensor = data_tensor[1:, :, :, :].cpu() # [38400, 3, 80, 80]
label_tensor = label_tensor[1:] # [38400]
feature_tensor = feature_tensor[1:, :].cpu() # [38400, 512]
logits_tensor = logits_tensor[1:, :].cpu() # [38400, 64]
# 得到供训练二分类器的pickle文件
# 64类, 每类找到最好的100张图像, 最差的100张图像;
#
gb_dataset = {
'data': [],
'feature': [],
'imgname': [],
'origin_label': [],
'logits': [],
'gb_label': []
}
for class_i in range(64):
class_i = int(class_i)
class_i_best = {}
class_i_worst = {}
class_i_logits = logits_tensor[:, class_i]
# 寻找最好的100个图像
b_sorted_class_i_logits, b_index = torch.sort(
class_i_logits, descending=True)
for n in range(100):
n_index = b_index[n]
gb_dataset['data'].append(data_tensor[n_index, :, :, :].clone())
gb_dataset['feature'].append(feature_tensor[n_index, :].clone())
gb_dataset['imgname'].append(imgname_list[n_index])
gb_dataset['origin_label'].append(label_tensor[n_index].item())
gb_dataset['logits'].append(b_sorted_class_i_logits[n].item())
gb_dataset['gb_label'].append(1.0)
# 寻找最坏的100个同类图像
w_sorted_class_i_logits, w_index = torch.sort(
class_i_logits, descending=False)
n = 0
count_image = 0
while count_image < 100:
n_index = w_index[n]
n_label = label_tensor[n_index].item()
if int(n_label) == class_i:
gb_dataset['data'].append(data_tensor[n_index, :, :, :].clone())
gb_dataset['feature'].append(feature_tensor[n_index, :].clone())
gb_dataset['imgname'].append(imgname_list[n_index])
gb_dataset['origin_label'].append(label_tensor[n_index].item())
gb_dataset['logits'].append(w_sorted_class_i_logits[n].item())
gb_dataset['gb_label'].append(0.0)
count_image += 1
n += 1
gb_dataset_file = open('save/gb_dataset/gb-dataset-small.pickle', 'wb')
pickle.dump(gb_dataset, gb_dataset_file)
gb_dataset_file.close()
# # 得到供观察数据的json文件
# # 计算并存储每一类最(不)相似的前100个图像
# for class_i in range(64):
# class_i = int(class_i)
# class_i_best = {}
# class_i_worst = {}
# class_i_logits = logits_tensor[:, class_i]
# # 寻找最好的100个图像
# b_sorted_class_i_logits, b_index = torch.sort(class_i_logits, descending=True)
# for n in range(100):
# n_index = b_index[n]
# n_imgname = imgname_list[n_index]
# n_label = label_tensor[n_index].item()
# n_logits = b_sorted_class_i_logits[n].item()
# class_i_best[int(n)] = {
# 'imgname': n_imgname,
# 'label': int(n_label),
# 'logits': n_logits
# }
# json_str = json.dumps(class_i_best)
# with open('save/dataset_info/best_images/train_class_' + str(class_i) + '_best.json', 'w') as json_file:
# json_file.write(json_str)
# # 寻找最坏的100个同类图像
# w_sorted_class_i_logits, w_index = torch.sort(class_i_logits, descending=False)
# n = 0
# count_image = 0
# while count_image < 100:
# n_index = w_index[n]
# n_imgname = imgname_list[n_index]
# n_label = label_tensor[n_index].item()
# n_logits = w_sorted_class_i_logits[n].item()
# if int(n_label) == class_i:
# class_i_worst[int(count_image)] = {
# 'imgname': n_imgname,
# 'label': int(n_label),
# 'logits': n_logits
# }
# count_image += 1
# n += 1
# json_str = json.dumps(class_i_worst)
# with open('save/dataset_info/worst_images/train_class_' + str(class_i) + '_worst.json', 'w') as json_file:
# json_file.write(json_str)
if False:
# 记录类别编号和名称匹配信息
json_str = json.dumps(classname_dict)
with open('save/dataset_info/train_class_info.json', 'w') as json_file:
json_file.write(json_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', default='./configs/get_best_base_data.yaml')
parser.add_argument('--name', default='best_base_data')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
get_best_base_sample(config)
|
CharleyZhao123/graceful-few-shot | utils/few_shot.py | import torch
# 划分数据为meta-task形式
def split_shot_query(data, way, shot, query, task_per_batch=1):
img_shape = data.shape[1:]
data = data.view(task_per_batch, way, shot + query, *img_shape)
x_shot, x_query = data.split([shot, query], dim=2)
x_shot = x_shot.contiguous()
x_query = x_query.contiguous().view(task_per_batch, way * query, *img_shape)
return x_shot, x_query
# 得到meta-task形式的label
def make_nk_label(n, k, task_per_batch=1):
label = torch.arange(n).unsqueeze(1).expand(n, k).reshape(-1)
label = label.repeat(task_per_batch)
return label
|
CharleyZhao123/graceful-few-shot | train_base_pretrain_network.py | <filename>train_base_pretrain_network.py
import argparse
import models
import torch
from models import build_model
from data import build_dataloader
import torch.nn.functional as F
import yaml
import utils
import os
from tqdm import tqdm
from tensorboardX import SummaryWriter
def main(config):
# ===== 准备记录以及log信息 =====
save_name = args.name
save_path = os.path.join('./save/train_base_pretrain_network', save_name)
utils.ensure_path(save_path)
utils.set_log_path(save_path)
tb_writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
yaml.dump(config, open(os.path.join(save_path, 'config.yaml'), 'w'))
# ===== 准备数据、模型 =====
# train data
train_dataloader = build_dataloader(config['train_dataloader_args'])
# val data
val_dataloader = build_dataloader(config['val_dataloader_args'])
# model
pretrain_model = build_model(config['network_args'])
utils.log('num params: {}'.format(utils.compute_n_params(pretrain_model)))
# ===== 训练 =====
# optimizer
trainer_args = config['trainer_args']
optimizer, lr_scheduler = utils.make_optimizer(pretrain_model.parameters(), trainer_args['optimizer_name'], **trainer_args['optimizer_args'])
max_epoch = trainer_args['max_epoch']
save_epoch = trainer_args['save_epoch']
max_val_acc = 0.0
timer_used = utils.Timer()
timer_epoch = utils.Timer()
# 执行训练
for epoch in range(1, max_epoch + 1):
timer_epoch.s()
aves_keys = ['train_loss', 'train_acc', 'val_loss', 'val_acc']
aves = {k: utils.Averager() for k in aves_keys}
pretrain_model.train()
tb_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
for image, label in tqdm(train_dataloader, desc='train', leave=False):
image = image.cuda()
label = label.cuda()
_, logits = pretrain_model(image)
ce_loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
optimizer.zero_grad()
ce_loss.backward()
optimizer.step()
aves['train_loss'].add(ce_loss.item())
aves['train_acc'].add(acc)
logits = None
ce_loss = None
# 验证
pretrain_model.eval()
for image, label in tqdm(val_dataloader, desc='val', leave=False):
image = image.cuda()
label = label.cuda()
with torch.no_grad():
_, logits = pretrain_model(image)
ce_loss = F.cross_entropy(logits, label)
acc = utils.compute_acc(logits, label)
aves['val_loss'].add(ce_loss.item())
aves['val_acc'].add(acc)
if lr_scheduler is not None:
lr_scheduler.step()
# 是否多余?
for k, v in aves.items():
aves[k] = v.item()
# 记录log, 保存checkpoint
t_epoch = utils.time_str(timer_epoch.t())
t_used = utils.time_str(timer_used.t())
t_estimate = utils.time_str(timer_used.t() / epoch * max_epoch)
epoch_str = str(epoch)
log_str = 'epoch {}, train {:.4f}|{:.4f}'.format(
epoch_str, aves['train_loss'], aves['train_acc'])
tb_writer.add_scalars('loss', {'train': aves['train_loss']}, epoch)
tb_writer.add_scalars('acc', {'train': aves['train_acc']}, epoch)
log_str += ', val {:.4f}|{:.4f}'.format(aves['val_loss'], aves['val_acc'])
tb_writer.add_scalars('loss', {'val': aves['val_loss']}, epoch)
tb_writer.add_scalars('acc', {'val': aves['val_acc']}, epoch)
log_str += ', {} {}/{}'.format(t_epoch, t_used, t_estimate)
utils.log(log_str)
if config.get('_parallel'):
model_ = pretrain_model.module
else:
model_ = pretrain_model
training = config['trainer_args']
save_obj = {
'file': __file__,
'config': config,
'model_sd': model_.state_dict(),
'training': training,
}
torch.save(save_obj, os.path.join(save_path, 'epoch-last.pth'))
if (save_epoch is not None) and epoch % save_epoch == 0:
torch.save(save_obj, os.path.join(
save_path, 'epoch-{}.pth'.format(epoch)))
if aves['val_acc'] > max_val_acc:
max_val_acc = aves['val_acc']
torch.save(save_obj, os.path.join(save_path, 'max-val-acc.pth'))
tb_writer.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/train_base_pretrain_network.yaml')
parser.add_argument('--name', default='train_base_pretrain_network')
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
if len(args.gpu.split(',')) > 1:
config['_parallel'] = True
config['_gpu'] = args.gpu
utils.set_gpu(args.gpu)
main(config)
|
jayway/iot-train-switch | switch.py | <reponame>jayway/iot-train-switch
#!/usr/bin/python
#
# Copyright 2017 <NAME>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the standard MIT license. See COPYING for more details.
import json
import time
import argparse
import Adafruit_PCA9685
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
class ShadowCallback(object):
def __init__(self, deviceShadowInstance, servoChannel, servoFreq, servoMin, servoMax):
self.instance = deviceShadowInstance
self.servo_channel = servoChannel
self.servo_min = servoMin
self.servo_max = servoMax
self.pwm = Adafruit_PCA9685.PCA9685()
self.pwm.set_pwm_freq(servoFreq)
def delta_callback(self, payload, responseStatus, token):
payload_dict = json.loads(payload)
use_sidetrack = payload_dict["state"]["use_sidetrack"]
servo_pos = self.servo_min if use_sidetrack else self.servo_max
self.pwm.set_pwm(self.servo_channel, 0, servo_pos)
response = json.dumps({"state":{"reported":payload_dict["state"]}})
self.instance.shadowUpdate(response, None, 5)
def main(endpoint, root_ca_path, cert_path, key_path, thing_name, servo_channel, servo_freq, servo_min, servo_max):
# Configure IoT client
shadow = AWSIoTMQTTShadowClient(thing_name)
shadow.configureEndpoint(endpoint, 8883)
shadow.configureCredentials(root_ca_path, key_path, cert_path)
# Connect
shadow.connect()
# Create shadow
shadow = shadow.createShadowHandlerWithName(thing_name, True)
callback_handler = ShadowCallback(
shadow, servo_channel, servo_freq, servo_min, servo_max)
# Register callback
shadow.shadowRegisterDeltaCallback(callback_handler.delta_callback)
# Loop until we get AWS IoT events
while True:
time.sleep(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="endpoint", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", required=True, dest="certPath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", required=True, dest="keyPath", help="Private key file path")
parser.add_argument("-n", "--thingName", action="store", required=True, dest="thingName", help="Targeted thing name")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, help="Use MQTT over WebSocket")
parser.add_argument("-s", "--servoChannel", type=int, action="store", required=True, dest="servoChannel", help="The i2c channel the servo is connected to")
parser.add_argument("-f", "--servoFrequency", type=int, action="store", dest="servoFreq", default=50, help="The servo frequency")
parser.add_argument("-m", "--servoMin", type=int, action="store", dest="servoMin", default=230, help="The servo min value")
parser.add_argument("-x", "--servoMax", type=int, action="store", dest="servoMax", default=310, help="The servo max value")
args = parser.parse_args()
main(args.endpoint, args.rootCAPath, args.certPath, args.keyPath, args.thingName, args.servoChannel, args.servoFreq, args.servoMin, args.servoMax)
|
gregmuellegger/django-viewset | django_viewset/__init__.py | from .mixins import ViewSetView
from .utils import viewset_view
from .viewset import ViewSet, ModelViewSet
from .views import NamedView, URLView
__version__ = '0.1.1'
|
gregmuellegger/django-viewset | django_viewset/views.py | <filename>django_viewset/views.py
class NamedView(object):
'''
A base class that is used in ViewSetMetaClass to indentify attributes that
shall be added to the view list of the ViewSet.
Contrary to the name, it's not an actual view but a wrapper around a
real class-based-view.
'''
creation_counter = 0
def __init__(self, view, name=None, view_kwargs=None):
self.view = view
self.name = name
self.creation_counter = NamedView.creation_counter
NamedView.creation_counter += 1
self.view_kwargs = view_kwargs if view_kwargs is not None else {}
def get_view_kwargs(self):
return self.view_kwargs
def __repr__(self):
return '<{class_}: {name} ({view})>'.format(
class_=self.__class__.__name__,
name=self.name,
view=self.view.__name__)
class URLView(NamedView):
'''
A named view wrapper with an attached url.
'''
def __init__(self, url, view, name=None, view_kwargs=None):
super(URLView, self).__init__(
view=view,
name=name,
view_kwargs=view_kwargs)
self.url = url
|
gregmuellegger/django-viewset | tests/test_viewset.py | <filename>tests/test_viewset.py<gh_stars>1-10
from django.views.generic import DetailView
from django.views.generic import UpdateView
from django_viewset import URLView
from django_viewset import ViewSet
def describe_viewset():
class SimpleViewSet(ViewSet):
read = URLView(r'^(?P<pk>[0-9]+)/read/$', DetailView)
update = URLView(r'^(?P<pk>[0-9]+)/update/$', UpdateView)
def it_contains_has_correct_order():
class ThreeViews(ViewSet):
a = URLView(r'a', DetailView)
c = URLView(r'c', DetailView)
b = URLView(r'b', DetailView)
viewset = ThreeViews()
assert viewset.views.keys() == ['a', 'c', 'b']
def it_contains_urlview():
viewset = SimpleViewSet()
assert len(viewset.views) == 2
assert isinstance(viewset.views['read'], URLView)
assert isinstance(viewset.views['update'], URLView)
def it_creates_urlconf():
viewset = SimpleViewSet()
urls = viewset.get_urls()
assert len(urls) == 2
read_pattern = urls[0]
assert read_pattern.name == 'read'
assert read_pattern.regex.pattern == '^(?P<pk>[0-9]+)/read/$'
|
gregmuellegger/django-viewset | tests/test_syntax.py | <filename>tests/test_syntax.py
def test_syntax():
import django_viewset
import django_viewset.mixins
import django_viewset.utils
import django_viewset.views
import django_viewset.viewset
|
gregmuellegger/django-viewset | tests/settings.py | import warnings
warnings.simplefilter('always')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = [
'django_viewset',
'tests',
]
MIDDLEWARE_CLASSES = ()
STATIC_URL = '/static/'
SECRET_KEY = '0'
SITE_ID = 1
import django
if django.VERSION < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
|
gregmuellegger/django-viewset | runtests.py | #!/usr/bin/env python
import pytest
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
# Adding current directory to ``sys.path``.
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
def runtests(*argv):
argv = list(argv) or [
'tests',
]
pytest.main(argv)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
gregmuellegger/django-viewset | django_viewset/viewset.py | <reponame>gregmuellegger/django-viewset
from django.conf.urls import url
from django.utils.six import with_metaclass
from .compat import OrderedDict
from .views import NamedView, URLView
def add_view(viewset, named_view, attribute):
if not getattr(named_view, 'name', None):
named_view.name = attribute
setattr(viewset, attribute, named_view)
viewset.views[attribute] = named_view
class ViewSetMetaClass(type):
def __new__(cls, name, bases, attrs):
new_class = super(ViewSetMetaClass, cls).__new__(
cls, name, bases, attrs)
# Make a copy of that list. Otherwise we have the same instance on the
# parent and child class. This would result in views beeing added to
# the parent which should only be defined on the children.
new_class.views = getattr(new_class, 'views', OrderedDict()).copy()
views = [
(attribute, value)
for attribute, value in attrs.items()
if isinstance(value, NamedView)]
views.sort(key=lambda x: x[1].creation_counter)
for attribute, view in views:
add_view(new_class, view, attribute)
return new_class
class ViewSet(with_metaclass(ViewSetMetaClass), object):
'''
This can be used as a mixin to provide a container for multiple views.
Views are attached via attributes to the ViewSet. An example can look as
follows::
class CRUDSet(django_viewset.ViewSet):
create_view = django_viewset.URLView(
r'^create/$', CreateView, name='create')
read_view = django_viewset.URLView(
r'^(?P<pk>[0-9]+)/read/$', ReadView, name='detail')
update_view = django_viewset.URLView(
r'^(?P<pk>[0-9]+)/update/$', UpdateView, name='update')
delete_view = django_viewset.URLView(
r'^(?P<pk>[0-9]+)/delete/$', DeleteView, name='delete')
The given URLView's can than be "exported" as urlpatterns view the
``get_urls`` method::
crud_set = CRUDSet()
urlpatterns = patterns('',
url(r'^crud/', crud_set.get_urls())
)
'''
urlname_separator = '-'
def __init__(self, urlname_prefix=None):
# Make a copy of that list, to prevent changes taking any effect on
# the class variable.
self.views = self.views.copy()
self.urlname_prefix = urlname_prefix
def add_view(self, named_view, attribute):
add_view(self, named_view, attribute)
def get_view(self, name):
for view in self.views.values():
if view.name == name:
return view
raise ValueError('Cannot find viewset view named {0}'.format(name))
def get_view_default_kwargs(self, viewset_view):
return {}
def get_view_kwargs(self, viewset_view):
'''
This will compile a dictionary of all kwargs that shall be passed into
the ``View.as_view`` constructor. There are few places checked to
collect these kwargs:
1. The ``ViewSet.get_view_default_kwargs`` will be used.
2. Check if the ``ViewSet`` instance has a method called
``get_<view-name>_view_kwargs`` and call this. The ``<view-name>``
part is replaced with the ``viewset_view.name`` attribute of the
passed in ``viewset_view``.
3. If no method is found in point 2. then the passed
``viewset_view.get_view_kwargs`` method will be used.
4. The ``viewset`` kwargs will be set to the current ViewSet instance,
but only if the View has an attribute called ``viewset``. This
prevents issues with django's class based views, since they will
raise an error if you pass in a kwargs with a name for which the
View has not existing attribute.
What this means is basically: Only pass in the ``viewset`` kwarg if
the View will accept it.
'''
kwargs = self.get_view_default_kwargs(viewset_view)
# Allow kwargs to be overriden
method_name = 'get_%s_view_kwargs' % viewset_view.name
if hasattr(self, method_name):
kwargs.update(getattr(self, method_name)(viewset_view))
else:
kwargs.update(viewset_view.get_view_kwargs())
# Only set the viewset, if the view will take it as parameter.
if hasattr(viewset_view.view, 'viewset'):
kwargs['viewset'] = self
return kwargs
def get_view_instance(self, viewset_view):
kwargs = self.get_view_kwargs(viewset_view)
try:
view_instance = viewset_view.view.as_view(**kwargs)
except Exception as e:
import sys
trace = sys.exc_info()[2]
new_exception = TypeError(
'Cannot instantiate viewset view "{}.{}". '
'The error was: {0}'.format(
self.__class__.__name__, viewset_view.name, e))
raise new_exception, None, trace
return view_instance
def get_view_urlname(self, viewset_view):
return '{prefix}{separator}{view_name}'.format(
prefix=self.urlname_prefix if self.urlname_prefix else '',
separator=self.urlname_separator if self.urlname_prefix else '',
view_name=viewset_view.name)
def get_urls(self):
patterns = []
for viewset_view in self.views.values():
# We will only create url patterns for views that have an actual
# url attribute. This is for example true for all subclasses of
# URLView.
if hasattr(viewset_view, 'url'):
view_instance = self.get_view_instance(viewset_view)
patterns.append(
url(regex=viewset_view.url,
view=view_instance,
name=self.get_view_urlname(viewset_view)))
return patterns
class ModelViewSet(ViewSet):
def __init__(self, model):
self.model = model
def get_view_default_kwargs(self, viewset_view):
kwargs = super(ModelViewSet, self).get_view_default_kwargs(viewset_view)
if hasattr(viewset_view.view, 'model'):
kwargs['model'] = self.model
return kwargs
|
gregmuellegger/django-viewset | django_viewset/compat.py | import django
# Is available in Python 2.7 and higher. SortedDict will be removed in Django
# 1.9 and higher.
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
|
gregmuellegger/django-viewset | django_viewset/mixins.py | <filename>django_viewset/mixins.py
class ViewSetView(object):
'''
A mixin for views to make them compatible with ``ViewSet``.
'''
# The ``viewset`` will be filled during the instantiation of the
# ``NamedView`` (i.e. ``NamedView.as_view()`` is called with the
# kwarg ``viewset``).
viewset = None
|
gregmuellegger/django-viewset | django_viewset/utils.py | from .mixins import ViewSetView
def viewset_view(view_class):
'''
A helper to make an existing class-based-view compatible with a
``ViewSet``. It therefore creates a new subclass of the passed in view
and subclasses from the necessary ``ViewSetView`` mixin as first base
class.
'''
class View(ViewSetView, view_class):
pass
return View
|
Hunter-DDM/cfer-document-level-RE | code/get_res.py | <reponame>Hunter-DDM/cfer-document-level-RE
import copy
import math
import json
import time
import re
import random
import gc
import sys
import os
import logging
import pickle
from tqdm.autonotebook import tqdm
import spacy
from spacy.tokens import Doc
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from transformers import BertTokenizer, BertModel, AdamW, BertConfig, BertPreTrainedModel, \
get_linear_schedule_with_warmup
# configurations for data
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
from config import config
from data_loader import get_relation, load_data
from model import *
config.model_path = '(model_path)'
config.ema_path = '(ema_path)'
config.res_save_path = 'result.json'
config.thresholds = [0.027206406, 0.7347192, 0.8, 0.8500842, 0.2866432, 0.8824334, 0.8, 0.07099054, 0.8, 0.33777493,
0.8675586, 0.14178617, 0.8, 0.0017886619, 0.018898232, 0.8, 0.8, 0.29970348, 0.8, 0.0028602716,
0.95426935, 0.8, 0.8, 0.1495586, 0.6003812, 0.049580034, 0.16661401, 0.77043945, 0.97031116,
0.9935579, 0.8, 0.9169548, 0.042989288, 0.8, 0.1299838, 0.9070824, 0.8, 0.7215598, 0.99140394,
0.54074734, 0.9154665, 0.78640866, 0.99458814, 0.8, 0.8, 0.39556777, 0.33989364, 0.7894069, 0.078295,
0.8, 0.1906913, 0.8, 0.02231309, 0.8, 0.49018496, 0.12782995, 0.20293304, 0.17651184, 0.8, 0.45584375,
0.74141777, 0.5691519, 0.74046874, 0.8812864, 0.8, 0.8, 0.8, 0.99002403, 0.6128277, 0.662927, 0.8,
0.8376514, 0.99243224, 0.9942542, 0.8792411, 0.8, 0.99344105, 0.7274136, 0.97489274, 0.8, 0.018518208,
0.99547833, 0.879309, 0.01869425, 0.8, 0.9983028, 0.8, 0.9657363, 0.8, 0.31774586, 0.8, 0.4790035,
0.8, 0.8, 0.8, 0.8]
config.threshold = 0.8
def data_process(data):
"""preprocessing"""
for document in data:
title = document['title']
sents = document['sents']
vertexSet = document['vertexSet']
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
if '\xa0' in word or word == ' ':
sents[i][j] = "-"
sent_idx = []
start = 0
word_num = 0
for sent in sents:
sent_idx.append(start)
start += len(sent)
word_num += len(sent)
for mentions in vertexSet:
for mention in mentions:
pos = mention['pos']
sent_id = mention['sent_id']
global_pos = [pos[0] + sent_idx[sent_id], pos[1] + sent_idx[sent_id]]
if len(global_pos) == 1:
mention['global_pos'] = global_pos
else:
mention['global_pos'] = [i for i in range(global_pos[0], global_pos[-1])]
document['sent_idx'] = sent_idx
document['word_num'] = word_num
return data
class MyTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, words):
return Doc(self.vocab, words=words)
class DataLoader_bert(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, data, evaluation=False):
self.nlp = spacy.load("en_core_web_sm")
self.nlp.tokenizer = MyTokenizer(self.nlp.vocab)
self.data = data
def get_ids(self, sents):
all_sent_ids = []
global_spans = []
sent_ids = []
global_pos = 0
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
id = config.tokenizer.encode(word, add_special_tokens=False)
sent_ids += id
cur_global_span = [i for i in range(global_pos, global_pos + len(id))]
global_spans.append(cur_global_span)
global_pos += len(id)
if len(sent_ids) >= 510:
sent_ids = sent_ids[:len(sent_ids)-len(id)]
sent_ids = [101] + sent_ids + [102]
all_sent_ids.append(sent_ids)
sent_ids = []
sent_ids += id
sent_ids = [101] + sent_ids + [102]
all_sent_ids.append(sent_ids)
if len(all_sent_ids) == 2:
max_len = len(all_sent_ids[0])
masks = np.zeros((2, max_len))
masks[0, :] = 1.
masks[1, :len(all_sent_ids[1])] = 1.
all_sent_ids[1] = all_sent_ids[1] + [0] * (max_len - len(all_sent_ids[1])) # padding
assert len(all_sent_ids[0]) == len(all_sent_ids[1])
else:
max_len = len(all_sent_ids[0])
masks = np.ones((1, max_len))
return all_sent_ids, global_spans, masks
def get_adj(self, sents, sent_idx, vertexSet, word_num):
"""
sents: list of sentence
sent_idx: the global index of the first word in sentences
masks: Tensor of (sent_num, MAX_LEN)
vertexSet: [{'name': 'Bergqvist', 'pos': [9, 10], 'sent_id': 5}, ...]
spans: word span in ids
word_num: total word number in the doc
"""
adj = np.zeros((word_num, word_num))
root_idx = []
for i, sent in enumerate(sents):
doc = self.nlp(sent)
for token in doc:
cur_index = token.i + sent_idx[i]
head_index = token.head.i + sent_idx[i]
adj[cur_index][head_index] = 1
adj[head_index][cur_index] = 1
if token.dep_ == 'ROOT':
root_idx.append(cur_index)
for i in range(len(root_idx) - 1):
start = root_idx[i]
end = root_idx[i + 1]
adj[start][end] = 1
adj[end][start] = 1
for i in range(adj.shape[0]):
adj[i][i] = 1
for i in range(adj.shape[0] - 1):
adj[i][i + 1] = 1
adj[i + 1][i] = 1
for mentions in vertexSet:
mention_index = []
for mention in mentions:
global_pos = mention['global_pos']
mention_index.append(global_pos[0])
for i in mention_index:
for j in mention_index:
adj[i][j] = 1
adj[j][i] = 1
return adj
def get_types(self, vertexSet, global_spans):
types = np.zeros((len(global_spans)), dtype=int)
for mentions in vertexSet:
for mention in mentions:
type_id = config.type2id[mention['type']]
global_pos = mention['global_pos']
for i in global_pos:
types[i] = type_id
return types
def get_distances(self, vertexSet):
distances = []
for i, head_mentions in enumerate(vertexSet):
for j, tail_mentions in enumerate(vertexSet):
if i == j:
distances.append([0])
continue
distance = []
for head_mention in head_mentions:
for tail_mention in tail_mentions:
head_sent_id = head_mention['sent_id']
tail_sent_id = tail_mention['sent_id']
distance.append(abs(head_sent_id - tail_sent_id))
distances.append(distance)
return distances
def get_commons(self, vertexSet, word_num):
commons = [0 for i in range(word_num)]
for i, mentions in enumerate(vertexSet):
for mention in mentions:
global_pos = mention['global_pos']
for p in global_pos:
commons[p] = i + 1
return commons
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""
Return
document: dict
adj: np.array of (word_num, word_num)
target: Tensor of (mention_num*mention_num, R)
sents_ids: Tensor of (sent_num, max_seq_len)
spans: list of span list
masks: Tensor of (sent_num, max_seq_len)
"""
if not isinstance(idx, int):
raise TypeError
if idx < 0 or idx >= len(self.data):
raise IndexError
document = self.data[idx]
vertexSet = document['vertexSet']
sents = document['sents']
word_num = document['word_num']
sent_idx = document['sent_idx']
sent_ids, global_spans, masks = self.get_ids(sents)
distances = self.get_distances(vertexSet)
commons = self.get_commons(vertexSet, word_num)
types = self.get_types(vertexSet, global_spans)
adjs = self.get_adj(sents, sent_idx, vertexSet, word_num)
return document, types, sent_ids, global_spans, masks, distances, commons, adjs
def get_res(data_loader, data_paths, model):
model.eval()
res = []
with torch.no_grad():
tk0 = tqdm(data_loader, total=len(data_loader), desc="Evaluating")
for bi, data in enumerate(tk0):
document, types, sents_ids, global_spans, masks, distances, commons, adj = data
paths_indexs = data_paths[bi] # (entity_num*entity_num, 70)
vertexSet = document['vertexSet']
title = document['title']
adj = torch.tensor(adj, dtype=torch.float).to(config.device)
types = torch.tensor(types, dtype=torch.long).to(config.device)
sents_ids = torch.tensor(sents_ids, dtype=torch.long).to(config.device)
masks = torch.tensor(masks, dtype=torch.long).to(config.device)
commons = torch.tensor(commons, dtype=torch.long).to(config.device)
indices = [i for i in range(len(paths_indexs))]
head_entity_index = []
tail_entity_index = []
n = len(vertexSet)
for i in indices:
head_index = i // n
tail_index = i % n
head_entity_index.append(head_index)
tail_entity_index.append(tail_index)
model.zero_grad()
pred = model(sents_ids, masks, vertexSet, types, adj, global_spans, paths_indexs, head_entity_index,
tail_entity_index, commons, distances)
pred = F.sigmoid(pred)
for i in range(pred.shape[0]):
if pred[i, -1] > config.threshold:
pred[i, :] = 0.
pred = pred[:, :-1]
for i in range(pred.shape[0]):
if i // n == i % n:
pred[i].fill_(0.)
pred = pred.cpu().numpy()
for i in range(96):
threshold = config.thresholds[i]
for j in range(pred.shape[0]):
score = pred[j][i]
if score >= threshold:
res.append({
"title": title,
"h_idx": j // n,
"t_idx": j % n,
"r": config.id2relation[i],
"evidence": []
})
return res
def load_datas():
train_data = load_data(config.train_data_path)
dev_data = load_data(config.dev_data_path)
test_data = load_data(config.test_data_path)
get_relation(train_data, dev_data)
train_data = data_process(train_data)
dev_data = data_process(dev_data)
train_paths = list(np.load(config.train_paths_path, allow_pickle=True))
dev_paths = list(np.load(config.dev_paths_path, allow_pickle=True))
train_adjs = np.load(config.train_adjs_path, allow_pickle=True)
dev_adjs = np.load(config.dev_adjs_path, allow_pickle=True)
train_adjs = list(train_adjs)
dev_adjs = list(dev_adjs)
return train_data, dev_data, test_data, train_paths, dev_paths, train_adjs, dev_adjs
model = Model().to(config.device)
checkpoint = torch.load(config.model_path)
model.load_state_dict(checkpoint['model_state_dict'])
del checkpoint
gc.collect()
with open(config.ema_path, 'rb') as f:
shadow = pickle.load(f, encoding='bytes')
ema = EMA(mu=0.999)
ema.shadow = shadow
train_data = load_data(config.train_data_path)
dev_data = load_data(config.dev_data_path)
test_data = load_data(config.test_data_path)
get_relation(train_data, dev_data)
test_data = data_process(test_data)
test_paths = list(np.load(config.test_paths_path, allow_pickle=True))
test_data_loader = DataLoader_bert(test_data)
dev_data = data_process(dev_data)
dev_paths = list(np.load(config.dev_paths_path, allow_pickle=True))
dev_data_loader = DataLoader_bert(dev_data)
ema.assign(model)
res = get_res(test_data_loader, test_paths, model)
#res = get_res(dev_data_loader, dev_paths, model)
result = []
for r in res:
r['h_idx'] = int(r['h_idx'])
r['t_idx'] = int(r['t_idx'])
result.append(r)
# save
with open(config.res_save_path, 'w') as f:
json.dump(result, f)
|
Hunter-DDM/cfer-document-level-RE | code/model.py | import copy
import math
import json
import time
import re
import random
import gc
import sys
import os
import logging
import pickle
from tqdm.autonotebook import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from transformers import BertTokenizer, BertModel, AdamW, BertConfig, BertPreTrainedModel, get_linear_schedule_with_warmup
from config import config
class EMA():
def __init__(self, mu):
self.mu = mu
self.shadow = {}
self.original = {} # cpu
def register(self, name, val):
self.shadow[name] = val.clone()
def __call__(self, model, num_updates):
decay = min(self.mu, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = (1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone().cpu()
param.data = self.shadow[name]
def resume(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name].cuda()
self.original = {}
class GraphConvLayer(nn.Module):
""" A GCN module operated on dependency graphs. """
def __init__(self, d, layers, dropout_gcn=config.dropout_gcn, self_loop=False):
super(GraphConvLayer, self).__init__()
self.d = d
self.layers = layers
self.head_dim = self.d // self.layers
self.gcn_drop = nn.Dropout(dropout_gcn)
self.linear_output = nn.Linear(self.d, self.d)
self.linear_gate = nn.Linear(self.d, self.d)
# dcgcn block
self.weight_list = nn.ModuleList()
for i in range(self.layers):
self.weight_list.append(nn.Linear((self.d + self.head_dim * i), self.head_dim))
self.self_loop = self_loop
def forward(self, adj, gcn_inputs):
'''
adj: (n, n)
gcn_inputs: (n, d)
'''
denom = adj.sum(-1).unsqueeze(-1) + 1 # (n, 1)
outputs = gcn_inputs
cache_list = [outputs]
output_list = []
for l in range(self.layers):
Ax = torch.matmul(adj, outputs) # (n, n) * (n, d) -> (n, d)
AxW = self.weight_list[l](Ax)
if self.self_loop:
AxW = AxW + self.weight_list[l](outputs) # self loop
else:
AxW = AxW
AxW = AxW / denom
gAxW = F.relu(AxW)
cache_list.append(gAxW)
outputs = torch.cat(cache_list, dim=1)
output_list.append(self.gcn_drop(gAxW))
gcn_outputs = torch.cat(output_list, dim=1)
gcn_outputs = gcn_outputs + gcn_inputs
# high way
gate = torch.sigmoid(self.linear_gate(gcn_outputs))
out = self.linear_output(gcn_outputs)
# drop=0.1 + relu
out = gate * out + (1 - gate) * gcn_outputs
return out
class Attention(nn.Module):
def __init__(self, d, dis_emb_dim):
super(Attention, self).__init__()
self.d = d
self.dis_emb_dim = dis_emb_dim
self.linear = nn.Linear(self.d * 4 + self.dis_emb_dim, 1)
def forward(self, entity, path, dis_embs):
"""
entity, path: (k, d*2)
dis_embs: (k, dis_emb_dim)
"""
inputs = torch.cat((entity, path, dis_embs), 1) # (self.d*4, 1)
scores = F.softmax(self.linear(inputs), 0) # (k, 1)
return scores
class Model(nn.Module):
def __init__(self, weight_matrix=None):
super(Model, self).__init__()
self.bert_size = config.bert_size
self.hidden_size = config.hidden_size
self.type_emb_dim = config.type_emb_dim
self.common_emb_dim = config.common_emb_dim
self.dis_emb_dim = config.dis_emb_dim
self.max_path_len = config.max_path_len
self.gcn_layer_first = config.gcn_layer_first
self.gcn_layer_second = config.gcn_layer_second
# encoder
self.bert = BertModel.from_pretrained(config.bert_path)
for param in self.bert.parameters():
param.requires_grad = True
self.type_embedding = nn.Embedding(config.type_num, self.type_emb_dim)
self.com_embedding = nn.Embedding(config.common_num, self.common_emb_dim)
self.dropout_embedding = nn.Dropout(config.dropout_embedding)
# GRU
self.gru = nn.GRU(self.bert_size + self.type_emb_dim + self.common_emb_dim, self.hidden_size // 2,
batch_first=True, bidirectional=True)
for name, param in self.gru.named_parameters():
if 'weight_ih' in name:
nn.init.orthogonal_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
self.dropout_gru = nn.Dropout(config.dropout_gru)
# dcgcn
self.gcns = nn.ModuleList()
self.gcns.append(GraphConvLayer(self.hidden_size, self.gcn_layer_first))
self.gcns.append(GraphConvLayer(self.hidden_size, self.gcn_layer_second))
# path + attention
self.gru2 = nn.GRU(self.hidden_size, self.hidden_size, batch_first=True, bidirectional=True)
for name, param in self.gru2.named_parameters():
if 'weight_ih' in name:
nn.init.orthogonal_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
self.dis_embedding = nn.Embedding(config.dis_num, self.dis_emb_dim)
self.attention = Attention(self.hidden_size, self.dis_emb_dim)
self.dropout_attention = nn.Dropout(config.dropout_attention)
# classification
self.bilinear = nn.Bilinear(self.hidden_size * 2, self.hidden_size * 2, config.R)
def get_word_embs(self, words_embs, global_spans):
res_embs = torch.zeros((len(global_spans), self.bert_size)).to(config.device)
for i, span in enumerate(global_spans):
start = span[0]
end = span[-1]
res_embs[i, :] = torch.mean(words_embs[start:end + 1], 0, True)
return res_embs
def get_entity_embs(self, words_embs, vertexSet):
entity_embs = torch.zeros((len(vertexSet), self.hidden_size)).to(config.device)
for i, mentions in enumerate(vertexSet):
cur_mention_embs = torch.zeros((len(mentions), self.hidden_size)).to(config.device)
for j, mention in enumerate(mentions):
global_pos = mention['global_pos']
indices = torch.tensor(global_pos, dtype=torch.long).to(config.device)
cur_mention_embs[j, :] = torch.mean(torch.index_select(words_embs, 0, indices), 0, True)
entity_embs[i, :] = torch.mean(cur_mention_embs, 0, True)
return entity_embs
def get_ht_embs(self, words_embs, paths_indexs, head_entity_embs, tail_entity_embs, distances):
head_embs = []
tail_embs = []
for i, paths_index in enumerate(paths_indexs): # search all paths (m, *path_num, max_len)
k = len(paths_index)
path_pad_u = torch.zeros((len(paths_index), self.max_path_len, self.hidden_size)).to(config.device)
lengths = []
for j, path_index in enumerate(paths_index):
indices = torch.tensor(path_index, dtype=torch.long).to(config.device)
path_pad_u[j, :len(path_index), :] = torch.index_select(words_embs, 0, indices)
lengths.append(len(path_index))
lengths = torch.tensor(lengths).to(config.device)
lens_sorted, lens_argsort = torch.sort(lengths, 0, True)
lens_sorted = lens_sorted.to(config.device)
lens_argsort = lens_argsort.to(config.device)
path_pad_u = torch.index_select(path_pad_u, 0, lens_argsort)
packed = pack_padded_sequence(path_pad_u, lens_sorted, batch_first=True)
output, h = self.gru2(packed)
output, _ = pad_packed_sequence(output, batch_first=True)
h = h.permute(1, 0, 2) # (k, 2, hidden_size)
head_path_embs = h[:, 1, :] # (k, hidden_size)
tail_path_embs = h[:, 0, :]
# Attention
if k > 1:
# distance embedding
distance = torch.tensor(distances[i], dtype=torch.long).to(config.device)
dis_embs = self.dis_embedding(distance) # (k, dis_emb_dim)
head_entity_emb = head_entity_embs[i].view(1, -1).repeat(k, 1) # (1, hidden_size) -> (k, hidden_size)
tail_entity_emb = tail_entity_embs[i].view(1, -1).repeat(k, 1)
entity_cat = torch.cat((head_entity_emb, tail_entity_emb), 1) # (k, hidden_size) -> (k, hidden_size*2)
path_cat = torch.cat((head_path_embs, tail_path_embs), 1)
path_weight = self.attention(entity_cat, path_cat, dis_embs) # (k, 1)
head_path_embs = path_weight * head_path_embs # (k, hidden_size)
tail_path_embs = path_weight * tail_path_embs
head_emb = torch.sum(head_path_embs, 0, True) # (1, hidden_size)
tail_emb = torch.sum(tail_path_embs, 0, True)
head_embs.append(head_emb)
tail_embs.append(tail_emb)
head_embs = torch.cat(head_embs).to(config.device)
tail_embs = torch.cat(tail_embs).to(config.device)
return head_embs, tail_embs
def forward(self, sents_ids, masks, vertexSet, types, adj, global_spans, paths_indexs, head_entity_index,
tail_entity_index, commons, distances):
hs = self.bert(input_ids=sents_ids, attention_mask=masks)[0] # (1 or 2, id_num, hidden_size)
if sents_ids.shape[0] == 1:
words_embs = hs.view(-1, hs.shape[-1])[1:-1] # (id_num-2, hidden_size)
else:
words_embs1 = hs[0, 1:-1]
indices = (masks[1] == True).nonzero().view(-1).to(config.device)
words_embs2 = torch.index_select(hs[1], 0, indices)[1:-1]
words_embs = torch.cat((words_embs1, words_embs2))
words_embs = self.get_word_embs(words_embs, global_spans) # (id_num, bert_size) -> (word_num, bert_size)
assert words_embs.shape[0] == adj.shape[0]
types_embs = self.type_embedding(types) # (word_num) -> (word_num, type_emb_dim)
common_embs = self.com_embedding(commons) # (word_num) -> (word_num, common_emb_dim)
words_embs = torch.cat((words_embs, types_embs, common_embs), 1) # -> (word_num, bert_size+type_emb_dim)
words_embs = self.dropout_embedding(words_embs)
# global encoding
words_embs = words_embs.view(1, words_embs.shape[0],
words_embs.shape[1]) # -> (1, word_num, type_size+bert_size)
words_embs = self.gru(words_embs)[0].squeeze() # (word_num, hidden_size)
words_embs = self.dropout_gru(words_embs)
# GCN
for i in range(len(self.gcns)):
words_embs = self.gcns[i](adj, words_embs)
# etity embs
etity_embs = self.get_entity_embs(words_embs, vertexSet) # (entity_num, hidden_size)
# entity pair embs
head_entity_indices = torch.tensor(head_entity_index, dtype=torch.long).to(config.device)
head_entity_embs = torch.index_select(etity_embs, 0, head_entity_indices)
tail_entity_indices = torch.tensor(tail_entity_index, dtype=torch.long).to(config.device)
tail_entity_embs = torch.index_select(etity_embs, 0, tail_entity_indices)
# entity pairs' head & tail embs
head_embs, tail_embs = self.get_ht_embs(words_embs, paths_indexs, head_entity_embs, tail_entity_embs, distances)
head = torch.cat((head_entity_embs, head_embs), 1)
tail = torch.cat((tail_entity_embs, tail_embs), 1)
head = self.dropout_attention(head)
tail = self.dropout_attention(tail)
pred = self.bilinear(head, tail)
return pred # (entity_pair_num, R)
|
Hunter-DDM/cfer-document-level-RE | code/data_loader.py | <gh_stars>1-10
import copy
import math
import json
import time
import re
import random
import gc
import sys
import os
import logging
import pickle
from tqdm.autonotebook import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from transformers import BertTokenizer, BertModel, AdamW, BertConfig, BertPreTrainedModel, get_linear_schedule_with_warmup
from config import config
# load
def load_data(path):
with open(path, 'r', encoding='utf-8', errors='replace') as f:
lines = f.readlines()
line = lines[0]
dicts = json.loads(line)
return dicts
def get_relation(train_data, dev_data):
relation = []
for d in train_data:
labels = d['labels']
for label in labels:
r = label['r']
relation.append(r)
for d in dev_data:
labels = d['labels']
for label in labels:
r = label['r']
relation.append(r)
relation = list(set(relation))
relation = sorted(relation)
for i, r in enumerate(relation):
config.relation2id[r] = i
config.id2relation[i] = r
config.relation2id['None'] = len(relation)
config.id2relation[len(relation)] = 'None'
def data_process(data):
"""preprocessing"""
for document in data:
title = document['title']
sents = document['sents']
vertexSet = document['vertexSet']
labels = document['labels']
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
if '\xa0' in word or word == ' ':
sents[i][j] = "-"
sent_idx = []
start = 0
word_num = 0
for sent in sents:
sent_idx.append(start)
start += len(sent)
word_num += len(sent)
for mentions in vertexSet:
for mention in mentions:
pos = mention['pos']
sent_id = mention['sent_id']
global_pos = [pos[0] + sent_idx[sent_id], pos[1] + sent_idx[sent_id]]
if len(global_pos) == 1:
mention['global_pos'] = global_pos
else:
mention['global_pos'] = [i for i in range(global_pos[0], global_pos[-1])]
triples = []
for label in labels:
h = label['h']
t = label['t']
r = label['r']
r_id = config.relation2id[r]
triples.append((h, r_id, t))
document['sent_idx'] = sent_idx
document['word_num'] = word_num
document['triples'] = triples
return data
""" **Data Loader** """
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, data, evaluation=False):
self.data = data
def get_ids(self, sents):
all_sent_ids = []
global_spans = []
sent_ids = []
global_pos = 0
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
id = config.tokenizer.encode(word, add_special_tokens=False)
sent_ids += id
cur_global_span = [i for i in range(global_pos, global_pos + len(id))]
global_spans.append(cur_global_span)
global_pos += len(id)
if len(sent_ids) >= 510:
sent_ids = sent_ids[:len(sent_ids) - len(id)]
sent_ids = [101] + sent_ids + [102]
all_sent_ids.append(sent_ids)
sent_ids = []
sent_ids += id
sent_ids = [101] + sent_ids + [102]
all_sent_ids.append(sent_ids)
if len(all_sent_ids) == 2:
max_len = len(all_sent_ids[0])
masks = np.zeros((2, max_len))
masks[0, :] = 1.
masks[1, :len(all_sent_ids[1])] = 1.
all_sent_ids[1] = all_sent_ids[1] + [0] * (max_len - len(all_sent_ids[1])) # padding
assert len(all_sent_ids[0]) == len(all_sent_ids[1])
else:
max_len = len(all_sent_ids[0])
masks = np.ones((1, max_len))
return all_sent_ids, global_spans, masks
def get_types(self, vertexSet, global_spans):
types = np.zeros((len(global_spans)), dtype=int)
for mentions in vertexSet:
for mention in mentions:
type_id = config.type2id[mention['type']]
global_pos = mention['global_pos']
for i in global_pos:
types[i] = type_id
return types
def get_distances(self, vertexSet):
distances = []
for i, head_mentions in enumerate(vertexSet):
for j, tail_mentions in enumerate(vertexSet):
if i == j:
distances.append([0])
continue
distance = []
for head_mention in head_mentions:
for tail_mention in tail_mentions:
head_sent_id = head_mention['sent_id']
tail_sent_id = tail_mention['sent_id']
distance.append(abs(head_sent_id - tail_sent_id))
distances.append(distance)
return distances
def get_commons(self, vertexSet, word_num):
commons = [0 for _ in range(word_num)]
for i, mentions in enumerate(vertexSet):
for mention in mentions:
global_pos = mention['global_pos']
for p in global_pos:
commons[p] = i + 1
return commons
def get_target(self, triples, vertexSet):
entity_num = len(vertexSet)
target = np.zeros((entity_num, entity_num, config.R), dtype=int)
target[:, :, -1] = 1
for triple in triples: # triple: (h, r, t)
target[triple[0], triple[2], triple[1]] = 1
target[triple[0], triple[2], -1] = 0
return target
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""
Return
document: dict
adj: np.array of (word_num, word_num)
target: Tensor of (mention_num*mention_num, R)
sents_ids: Tensor of (sent_num, max_seq_len)
spans: list of span list
masks: Tensor of (sent_num, max_seq_len)
"""
if not isinstance(idx, int):
raise TypeError
if idx < 0 or idx >= len(self.data):
raise IndexError
document = self.data[idx]
vertexSet = document['vertexSet']
sents = document['sents']
word_num = document['word_num']
triples = document['triples']
sent_ids, global_spans, masks = self.get_ids(sents)
distances = self.get_distances(vertexSet)
commons = self.get_commons(vertexSet, word_num)
types = self.get_types(vertexSet, global_spans)
target = self.get_target(triples, vertexSet)
return document, types, target, sent_ids, global_spans, masks, distances, commons
|
Hunter-DDM/cfer-document-level-RE | code/config.py | <reponame>Hunter-DDM/cfer-document-level-RE
import torch
from transformers import BertTokenizer
class config:
# dataset
train_data_path = './data/train_annotated.json'
dev_data_path = './data/dev.json'
test_data_path = './data/test.json'
# path
train_paths_path = './data/path/train_paths.npy'
dev_paths_path = './data/path/dev_paths.npy'
test_paths_path = './data/path/test_paths.npy'
# adj matrix
train_adjs_path = './data/adj/train_adjs.npy'
dev_adjs_path = './data/adj/dev_adjs.npy'
# save path
save_path = './model/model_bert_512_para_ema2_test'
ema_path = './model/model_bert_512_para_ema2_test'
# log path
log_path = './log/model_bert_512_para_ema2.log'
# bert path
bert_path = 'bert-base-uncased'
len_head_entity_index = 0
# hyper-parameters (to be tuned)
device = torch.device('cuda:0')
EPOCHS = 300
tokenizer = BertTokenizer.from_pretrained(bert_path)
lr_other = 5e-4
hidden_size = 512
dropout_embedding = 0.2
dropout_gru = 0.2
dropout_attention = 0.4
dropout_gcn = 0.6
sample_rate = 3
max_path_len = 55
dis_num = 50
common_num = 50
batch_size = 32
thresholds = [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
lr_bert = 1e-5
warmup_steps = 0.06
num_updates = 0
gcn_layer_first = 4
gcn_layer_second = 4
# hyper-parameters (do not tune)
bert_size = 768
common_emb_dim = 20
dis_emb_dim = 20
type_emb_dim = 100
R = 97
type2id = {'ORG': 1, 'TIME': 2, 'MISC': 3, 'LOC': 4, 'PER': 5, 'NUM': 6, 'None': 7}
type_num = 7
VOCAB_SIZE = 65507
relation2id = {}
id2relation = {}
|
Hunter-DDM/cfer-document-level-RE | code/main.py | <filename>code/main.py
import argparse
import copy
import math
import json
import time
import re
import random
import gc
import sys
import os
import logging
import pickle
from tqdm.autonotebook import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from transformers import BertTokenizer, BertModel, AdamW, BertConfig, BertPreTrainedModel, get_linear_schedule_with_warmup
from config import config
from data_loader import *
from model import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
level=logging.DEBUG,
filename=config.log_path,
filemode='a')
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=42)
def loss_fn(pred, target):
"""
pred: (n*n, R)
target: (n*n, R)
"""
loss_fct = nn.BCEWithLogitsLoss(reduction='mean')
return loss_fct(pred, target)
def train_fn(data_loader, data_adjs, data_paths, model, optimizer, scheduler, ema):
model.train()
all_loss = 0.
data_index = [i for i in range(len(data_loader))]
random.shuffle(data_index)
tk0 = tqdm(data_index, total=len(data_index), desc="Training")
model.zero_grad()
for bi, index in enumerate(tk0):
document, types, target, sents_ids, global_spans, masks, all_distances, commons = data_loader[index]
adj = data_adjs[index]
paths_list = data_paths[index] # (entity_num*entity_num, 70)
vertexSet = document['vertexSet']
n = len(vertexSet)
adj = torch.tensor(adj, dtype=torch.float)
types = torch.tensor(types, dtype=torch.long)
target = torch.tensor(target, dtype=torch.float).view(-1, config.R) # (entity_num*entity_num, 97)
sents_ids = torch.tensor(sents_ids, dtype=torch.long) # (1 or 2, id_num)
masks = torch.tensor(masks, dtype=torch.long)
commons = torch.tensor(commons, dtype=torch.long)
post_count = int(target[:, :-1].sum().item())
idx = [i for i in range(target.shape[0])]
indices = []
mask_count = 0
random.shuffle(idx)
if post_count == 0:
for i in idx[:20]:
indices.append(i)
else:
for i in idx:
if len(indices) >= 173:
break
if target[i][-1].item() == 0:
indices.append(i)
if target[i][-1].item() == 1 and mask_count < post_count * config.sample_rate:
indices.append(i)
mask_count += 1
random.shuffle(indices)
paths_indexs = [paths_list[i] for i in indices]
distances = [all_distances[i] for i in indices]
head_entity_index = []
tail_entity_index = []
for i in indices:
head_index = i // n
tail_index = i % n
head_entity_index.append(head_index)
tail_entity_index.append(tail_index)
sents_ids = sents_ids.to(config.device)
types = types.to(config.device)
adj = adj.to(config.device)
masks = masks.to(config.device)
commons = commons.to(config.device)
indices = torch.tensor(indices, dtype=torch.long)
target = torch.index_select(target, 0, indices).to(config.device)
pred = model(sents_ids, masks, vertexSet, types, adj, global_spans, paths_indexs, head_entity_index,
tail_entity_index, commons, distances)
loss = loss_fn(pred, target)
tk0.set_postfix(loss=loss.item())
loss = loss / config.batch_size
loss.backward()
if (bi > 0 and bi % config.batch_size == 0) or bi == len(data_index) - 1:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
# EMA
ema(model, config.num_updates)
config.num_updates += 1
all_loss += loss.item()
return all_loss / len(data_loader)
def eval(data_loader, data_adjs, data_paths, model):
model.eval()
pred_counts = [0 for _ in range(len(config.thresholds))]
pred_rights = [0 for _ in range(len(config.thresholds))]
right = 0
with torch.no_grad():
tk0 = tqdm(data_loader, total=len(data_loader), desc="Evaluating")
for bi, data in enumerate(tk0):
document, types, target, sents_ids, global_spans, masks, all_distances, commons = data_loader[bi]
adj = data_adjs[bi]
paths_indexs = data_paths[bi]
vertexSet = document['vertexSet']
triples = document['triples']
adj = torch.tensor(adj, dtype=torch.float).to(config.device)
types = torch.tensor(types, dtype=torch.long).to(config.device)
sents_ids = torch.tensor(sents_ids, dtype=torch.long).to(config.device)
masks = torch.tensor(masks, dtype=torch.long).to(config.device)
commons = torch.tensor(commons, dtype=torch.long).to(config.device)
indices = [i for i in range(len(paths_indexs))]
head_entity_index = []
tail_entity_index = []
n = len(vertexSet)
for i in indices:
head_index = i // n
tail_index = i % n
head_entity_index.append(head_index)
tail_entity_index.append(tail_index)
model.zero_grad()
pred = model(sents_ids, masks, vertexSet, types, adj, global_spans, paths_indexs, head_entity_index,
tail_entity_index, commons, all_distances)
pred = pred.view(len(vertexSet), len(vertexSet), -1) # (mention_num, mention_num, 97)
pred = pred[:, :, :-1]
pred = F.sigmoid(pred)
right += len(triples)
for i, threshold in enumerate(config.thresholds):
pred_ = (pred > threshold).nonzero().cpu().numpy()
pred_counts[i] += pred_.shape[0]
for j in range(pred_.shape[0]):
head = pred_[j][0]
tail = pred_[j][1]
relation = pred_[j][2]
if (head, relation, tail) in triples:
pred_rights[i] += 1
best_P = 0.
best_R = 0.
best_F1 = 0.
best_threshold = 0.
for i, threshold in enumerate(config.thresholds):
pred_count = pred_counts[i]
pred_right = pred_rights[i]
if pred_count == 0:
P = 0
else:
P = pred_right / pred_count
R = pred_right / right
F1 = 0.
if P + R > 0:
F1 = 2 * P * R / (P + R)
if best_F1 < F1:
best_P = P
best_R = R
best_F1 = F1
best_threshold = threshold
return best_P, best_R, best_F1, best_threshold
def run(model, train_data_loader, dev_data_loader, train_adjs, dev_adjs, train_paths, dev_paths, optimizer, scheduler,
ema):
best_f1 = 0.
best_threshold = 0.
for epoch in range(config.EPOCHS):
print('epoch:', epoch)
loss = train_fn(
train_data_loader,
train_adjs,
train_paths,
model,
optimizer,
scheduler,
ema
)
if (epoch < 100 and epoch % 10 == 0) or (epoch > 200 and epoch < 250) or (epoch > 250 and epoch % 5 == 0):
# EMA assign
ema.assign(model)
P, R, F1, threshold = eval(
dev_data_loader,
dev_adjs,
dev_paths,
model
)
# EMA resume
ema.resume(model)
print('Dev: P:{}, R:{}, F1:{}, threshold:{}'.format(str(P)[:6], str(R)[:6], str(F1)[:6], threshold))
logging.info('Dev: P:{}, R:{}, F1:{}, threshold:{}'.format(str(P)[:6], str(R)[:6], str(F1)[:6], threshold))
if F1 > best_f1:
checkpoint = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
}
model_path = config.save_path + "_{}_{}.pkl".format(threshold, str(F1)[:6])
torch.save(checkpoint, model_path)
best_f1 = F1
best_threshold = threshold
# save EMA
with open(config.ema_path + str(config.num_updates) + '_{}.json'.format(threshold, str(F1)[:6]),
'wb') as json_file:
pickle.dump(ema.shadow, json_file)
print('ema saved!')
print('best f1:', best_f1, 'best_threshold:', best_threshold)
logging.info('best f1:{}, best_threshold:{}'.format(str(best_f1), best_threshold))
def load_datas():
train_data = load_data(config.train_data_path)
dev_data = load_data(config.dev_data_path)
test_data = load_data(config.test_data_path)
get_relation(train_data, dev_data)
train_data = data_process(train_data)
dev_data = data_process(dev_data)
train_paths = list(np.load(config.train_paths_path, allow_pickle=True))
dev_paths = list(np.load(config.dev_paths_path, allow_pickle=True))
train_adjs = np.load(config.train_adjs_path, allow_pickle=True)
dev_adjs = np.load(config.dev_adjs_path, allow_pickle=True)
train_adjs = list(train_adjs)
dev_adjs = list(dev_adjs)
return train_data, dev_data, test_data, train_paths, dev_paths, train_adjs, dev_adjs
def main():
train_data, dev_data, test_data, train_paths, dev_paths, train_adjs, dev_adjs = load_datas()
train_data_loader = DataLoader(train_data)
dev_data_loader = DataLoader(dev_data)
model = Model().to(config.device)
bert_no_decay = [p for n, p in model.named_parameters() if 'bert' in n and ('bias' in n or 'LayerNorm' in n)]
bert_decay = [p for n, p in model.named_parameters() if 'bert' in n and 'bias' not in n and 'LayerNorm' not in n]
other_params_decay = [p for n, p in model.named_parameters() if 'bert' not in n and 'bias' not in n]
other_params_no_decay = [p for n, p in model.named_parameters() if 'bert' not in n and 'bias' in n]
optimizer = AdamW([{'params': other_params_decay, 'lr': config.lr_other, 'weight_decay': 0.0001},
{'params': other_params_no_decay, 'lr': config.lr_other, 'weight_decay': 0},
{'params': bert_decay, 'lr': config.lr_bert, 'weight_decay': 0.0001},
{'params': bert_no_decay, 'lr': config.lr_bert, 'weight_decay': 0}]
)
num_training_steps = config.EPOCHS * math.ceil(len(train_data_loader) // config.batch_size)
num_warmup_steps = config.warmup_steps * num_training_steps
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps)
ema = EMA(mu=0.9999)
for name, param in model.named_parameters():
if param.requires_grad:
ema.register(name, param)
run(model, train_data_loader, dev_data_loader, train_adjs, dev_adjs, train_paths, dev_paths, optimizer, scheduler,
ema)
main()
|
arita37/pyvtreat | pkg/tests/test_range.py | import vtreat.util
import pandas
import numpy
def test_range():
# https://github.com/WinVector/pyvtreat/blob/master/Examples/Bugs/asarray_issue.md
# https://github.com/WinVector/pyvtreat/issues/7
numpy.random.seed(2019)
arr = numpy.random.randint(2, size=10)
sparr = pandas.arrays.SparseArray(arr, fill_value=0)
assert vtreat.util.has_range(arr)
assert vtreat.util.has_range(sparr)
|
arita37/pyvtreat | pkg/tests/test_col_name_issues.py | <reponame>arita37/pyvtreat
import pytest
import pandas
import vtreat
def test_col_dups_1():
d = pandas.DataFrame({'x': [1], 'x2': [2], 'y': [3]})
d.columns = ['x', 'x', 'y']
transform = vtreat.UnsupervisedTreatment(
var_list=['x'],
cols_to_copy=['y']
)
with pytest.raises(ValueError):
transform.fit_transform(d, d["y"])
def test_xgboost_col_name_issue_1():
# https://stackoverflow.com/questions/48645846/pythons-xgoost-valueerrorfeature-names-may-not-contain-or
# ValueError('feature_names may not contain [, ] or <')
d = pandas.DataFrame({'x': ['[', ']', '<' , '>']})
transform = vtreat.UnsupervisedTreatment(
var_list=['x']
)
d_transformed = transform.fit_transform(d, None)
cols = d_transformed.columns
for col in cols:
assert not any(c in col for c in "[]<>")
assert len(set(cols)) == len(cols)
def test_xgboost_col_name_issue_2():
# https://stackoverflow.com/questions/48645846/pythons-xgoost-valueerrorfeature-names-may-not-contain-or
# ValueError('feature_names may not contain [, ] or <')
d = pandas.DataFrame({'x': ['[', ']', '<' , '_lt_']})
transform = vtreat.UnsupervisedTreatment(
var_list=['x']
)
d_transformed = transform.fit_transform(d, None)
cols = d_transformed.columns
for col in cols:
assert not any(c in col for c in "[]<>")
assert len(set(cols)) == len(cols)
|
arita37/pyvtreat | pkg/tests/test_result_restriction.py | <reponame>arita37/pyvtreat
import pandas
import numpy
import numpy.random
import vtreat
import vtreat.util
def test_classification():
numpy.random.seed(46546)
def make_data(nrows):
d = pandas.DataFrame({"x": [0.1 * i for i in range(nrows)]})
d["y"] = d["x"] + numpy.sin(d["x"]) + 0.1 * numpy.random.normal(size=d.shape[0])
d["xc"] = ["level_" + str(5 * numpy.round(yi / 5, 1)) for yi in d["y"]]
d["x2"] = numpy.random.normal(size=d.shape[0])
d.loc[d["xc"] == "level_-1.0", "xc"] = numpy.nan # introduce a nan level
d["yc"] = d["y"] > 0.5
return d
d = make_data(500)
vars = [c for c in d.columns if c not in set(['y', 'yc'])]
d_test = make_data(100)
transform = vtreat.BinomialOutcomeTreatment(
outcome_name="yc", # outcome variable
outcome_target=True, # outcome of interest
cols_to_copy=["y"], # columns to "carry along" but not treat as input variables
params=vtreat.vtreat_parameters({
'filter_to_recommended': False
})
)
d_prepared = transform.fit_transform(d[vars], d["yc"])
# show vars are under control
assert transform.get_result_restriction() is None
assert 'x2' in set(d_prepared.columns)
transform.set_result_restriction(['xc_logit_code', 'x2'])
dt_prepared = transform.transform(d_test)
assert set(dt_prepared.columns) == set(['y', 'yc', 'x2', 'xc_logit_code'])
transform = vtreat.BinomialOutcomeTreatment(
outcome_name="yc", # outcome variable
outcome_target=True, # outcome of interest
cols_to_copy=["y"], # columns to "carry along" but not treat as input variables
params=vtreat.vtreat_parameters({
'filter_to_recommended': True
})
)
d_prepared = transform.fit_transform(d[vars], d["yc"])
assert transform.get_result_restriction() is not None
assert 'x2' not in transform.get_result_restriction()
assert 'x2' not in set(d_prepared.columns)
transform.set_result_restriction(['xc_logit_code', 'x2'])
dt_prepared = transform.transform(d_test)
assert set(dt_prepared.columns) == set(['y', 'yc', 'x2', 'xc_logit_code'])
|
arita37/pyvtreat | pkg/vtreat/util.py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 11:40:41 2019
@author: johnmount
"""
import math
import statistics
import hashlib
import numpy
import pandas
import vtreat.stats_utils
def safe_to_numeric_array(x):
# work around https://github.com/WinVector/pyvtreat/issues/7
# noinspection PyTypeChecker
return numpy.asarray(pandas.Series(x) + 0.0, dtype=float)
def can_convert_v_to_numeric(x):
"""check if non-empty vector can convert to numeric"""
try:
numpy.asarray(x + 0.0, dtype=float)
return True
except TypeError:
return False
def is_bad(x):
""" for numeric vector x, return logical vector of positions that are null, NaN, infinite"""
if can_convert_v_to_numeric(x):
x = safe_to_numeric_array(x)
return numpy.logical_or(
pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x))
)
return pandas.isnull(x)
def has_range(x):
x = safe_to_numeric_array(x)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
if n_not_bad < 2:
return False
x = x[not_bad]
return numpy.max(x) > numpy.min(x)
def summarize_column(x, *, fn=numpy.mean):
"""
Summarize column to a non-missing scalar.
:param x: a vector/Series or column of numbers
:param fn: summarize function (such as numpy.mean), only passed non-bad positions
:return: scalar float summary of the non-None positions of x (otherwise 0)
"""
x = safe_to_numeric_array(x)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
if n_not_bad < 1:
return 0.0
x = x[not_bad]
v = 0.0 + fn(x)
if pandas.isnull(v) or math.isnan(v) or math.isinf(v):
return 0.0
return v
def characterize_numeric(x):
"""compute na count, min,max,mean of a numeric vector"""
x = safe_to_numeric_array(x)
not_bad = numpy.logical_not(is_bad(x))
n_not_bad = sum(not_bad)
n = len(x)
if n_not_bad <= 0:
return {
"n": n,
"n_not_bad": n_not_bad,
"min": None,
"mean": None,
"max": None,
"varies": False,
"has_range": False,
}
x = x[not_bad]
mn = numpy.min(x)
mx = numpy.max(x)
return {
"n": n,
"n_not_bad": n_not_bad,
"min": mn,
"mean": numpy.mean(x),
"max": mx,
"varies": (mx > mn) or ((n_not_bad > 0) and (n_not_bad < n)),
"has_range": (mx > mn),
}
def grouped_by_x_statistics(x, y):
"""compute some grouped by x vector summaries of numeric y vector (no missing values in y)"""
n = len(x)
if n <= 0:
raise ValueError("no rows")
if n != len(y):
raise ValueError("len(y)!=len(x)")
y = safe_to_numeric_array(y)
eps = 1.0e-3
sf = pandas.DataFrame({"x": x, "y": y})
sf.reset_index(inplace=True, drop=True)
bad_posns = pandas.isnull(sf["x"])
sf.loc[bad_posns, "x"] = "_NA_"
global_mean = sf["y"].mean()
sf["_group_mean"] = sf.groupby("x")["y"].transform("mean")
sf["_var"] = (sf["y"] - sf["_group_mean"]) ** 2
sf["_ni"] = 1
sf = sf.groupby("x").sum()
sf.reset_index(inplace=True, drop=False)
sf["y"] = sf["y"] / sf["_ni"]
sf["_group_mean"] = sf["_group_mean"] / sf["_ni"]
sf["_var"] = sf["_var"] / (sf["_ni"] - 1) + eps
avg_var = 0
bad_vars = is_bad(sf["_var"])
if sum(bad_vars) < len(sf["_var"]):
avg_var = numpy.nanmean(sf["_var"])
sf.loc[bad_vars, "_var"] = avg_var
if sf.shape[0] > 1:
sf["_vb"] = statistics.variance(sf["_group_mean"]) + eps
else:
sf["_vb"] = eps
sf["_gm"] = global_mean
# hierarchical model is in:
# http://www.win-vector.com/blog/2017/09/partial-pooling-for-lower-variance-variable-encoding/
# using naive empirical estimates of variances
# adjusted from ni to ni-1 and +eps variance to make
# rare levels look like new levels.
sf["_hest"] = (
(sf["_ni"] - 1) * sf["_group_mean"] / sf["_var"] + sf["_gm"] / sf["_vb"]
) / ((sf["_ni"] - 1) / sf["_var"] + 1 / sf["_vb"])
return sf
def score_variables(cross_frame, variables, outcome,
*,
is_classification=False):
"""score the linear relation of variables to outcome"""
if len(variables) <= 0:
return None
n = cross_frame.shape[0]
if n != len(outcome):
raise ValueError("len(n) must equal cross_frame.shape[0]")
outcome = safe_to_numeric_array(outcome)
def f(v):
col = cross_frame[v]
col = safe_to_numeric_array(col)
if (n > 2) and \
(numpy.max(col) > numpy.min(col)) and \
(numpy.max(outcome) > numpy.min(outcome)):
cor, sig = vtreat.stats_utils.our_corr_score(y_true=outcome, y_pred=col)
r2 = cor**2
if is_classification:
r2, sig = vtreat.stats_utils.our_pseudo_R2(y_true=outcome, y_pred=col)
sfi = pandas.DataFrame(
{
"variable": [v],
"has_range": [True],
"PearsonR": cor,
"R2": r2,
"significance": sig,
}
)
else:
sfi = pandas.DataFrame(
{
"variable": [v],
"has_range": [False],
"PearsonR": [numpy.NaN],
"R2": [numpy.NaN],
"significance": [1.0],
}
)
return sfi
sf = [f(v) for v in variables]
if len(sf) <= 0:
return None
sf = pandas.concat(sf, axis=0, sort=False)
sf.reset_index(inplace=True, drop=True)
return sf
def check_matching_numeric_frames(*, res, expect, tol=1.0e-4):
"""
Check if two numeric pandas.DataFrame s are identical. assert if not
:param res:
:param expect:
:param tol: numeric tolerance.
:return: None
"""
assert isinstance(expect, pandas.DataFrame)
assert isinstance(res, pandas.DataFrame)
assert res.shape == expect.shape
for c in expect.columns:
ec = expect[c]
rc = res[c]
assert numpy.max(numpy.abs(ec - rc)) <= tol
def unique_itmes_in_order(lst):
ret = []
if lst is not None:
seen = set()
for item in lst:
if item not in seen:
ret.append(item)
seen.add(item)
return ret
def clean_string(strng):
mp = {'<': '_lt_',
'>': '_gt_',
'[': '_osq_',
']': '_csq_',
'(': '_op_',
')': '_cp_',
'.': '_',
}
for (k, v) in mp.items():
strng = strng.replace(k, v)
return strng
def build_level_codes(incoming_column_name, levels):
levels = [str(lev) for lev in levels]
levels = [incoming_column_name + "_lev_" + clean_string(lev) for lev in levels]
if len(set(levels)) != len(levels):
levels = [levels[i] + "_" + str(i) for i in range(len(levels))]
return levels
def hash_data_frame(d):
return hashlib.sha256(pandas.util.hash_pandas_object(d).values).hexdigest()
|
LonelVino/club-chinois-home | api_v2/views.py | <filename>api_v2/views.py<gh_stars>0
from django.http import JsonResponse
def names(request):
return JsonResponse({'names': ['William', 'Rod', 'Grant']}) |
LonelVino/club-chinois-home | TestModel/views.py | <filename>TestModel/views.py
'''
/TestModel/views.py
-------------------------
Organize the views of Pages
'''
from django.http import JsonResponse
from django.http import HttpResponse
from TestModel.models import Test
from django.forms.models import model_to_dict
from rest_framework import serializers
def pages(request):
return JsonResponse({'pages': ['User', 'ane', 'vol', 'pitch', '']})
# 数据库操作
def testdb(request):
# 添加数据
return HttpResponse("<p>Connect successfully!</p>")
def testdb_get(request):
# 获取数据
response = ""
response1 = ""
# 通过objects这个模型管理器的all()获得所有数据行,相当于SQL中的SELECT * FROM
list = Test.objects.all()
# filter相当于SQL中的WHERE,可设置条件过滤结果
response2 = Test.objects.filter(id=1)
# 获取单个对象
response3 = Test.objects.get(id=1)
# 限制返回的数据 相当于 SQL 中的 OFFSET 0 LIMIT 1;
Test.objects.order_by('name')[0:1]
#数据排序
Test.objects.order_by("id")
# 上面的方法可以连锁使用
Test.objects.filter(name="runoob").order_by("id")
# 输出所有数据
for var in list:
response1 += var.name + " "
response = response1
return HttpResponse("<p> Get data successfully, data is: " + response + "</p>")
def testdb_post(request):
# 添加数据
test1 = Test(name='runoob')
test1.save()
return HttpResponse("<p>数据添加成功!</p>")
def testdb_update(request):
# 更新数据
# 修改其中一个id=1的name字段,再save,相当于SQL中的UPDATE
test1 = Test.objects.get(id=1)
test1.name = 'Google'
test1.save()
# 另外一种方式
#Test.objects.filter(id=1).update(name='Google')
# 修改所有的列
# Test.objects.all().update(name='Google')
return HttpResponse("<p>修改成功</p>")
def testdb_delete(request):
# 删除数据
# 删除id=1的数据
test1 = Test.objects.all()
if test1:
test1[0].delete()
return HttpResponse("<p>删除成功</p>")
else:
return HttpResponse("<p>Fail to DELETE. The table is empty!</p>")
# 另外一种方式
# Test.objects.filter(id=1).delete()
# 删除所有数据
# Test.objects.all().delete()
|
LonelVino/club-chinois-home | api_v2/apps.py | <gh_stars>0
from django.apps import AppConfig
class ApiV2Config(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'api_v2'
|
ben21-meet/meet2019y1lab4 | Lab2.py | <filename>Lab2.py
from turtle import Turtle
import random
import turtle
class Ball(Turtle):
def __init__(self,r,color,dx,dy,):
self.r = r
self.color = color
self.penup()
self = (r,dx,dy)
Turtle.shape("Circle")
self = r/10
def move(screen_width,screen_height):
self.dx = current_x
current_x = new_x
self.dy = current_y
current_y = new_y
right_side_ball = (new_x + r)
left_side_ball = (new_y + r)
print("Ball is moving..")
screen_width = turtle.getcanvas().winfo_width()/2
screen_height = turtle.getcanvas().winfo_height()/2
ben23 = Ball(100,"Yellow",100,100)
i = 989
while i < 1000:
ben23.move()
i += 1
turtle.mainloop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.