id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
26,100 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.set_commission | def set_commission(self, us_equities=None, us_futures=None):
"""Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar`
"""
if self.initialized:
raise SetCommissionPostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.commission_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.commission_models[Future] = us_futures | python | def set_commission(self, us_equities=None, us_futures=None):
"""Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar`
"""
if self.initialized:
raise SetCommissionPostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.commission_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.commission_models[Future] = us_futures | [
"def",
"set_commission",
"(",
"self",
",",
"us_equities",
"=",
"None",
",",
"us_futures",
"=",
"None",
")",
":",
"if",
"self",
".",
"initialized",
":",
"raise",
"SetCommissionPostInit",
"(",
")",
"if",
"us_equities",
"is",
"not",
"None",
":",
"if",
"Equity",
"not",
"in",
"us_equities",
".",
"allowed_asset_types",
":",
"raise",
"IncompatibleCommissionModel",
"(",
"asset_type",
"=",
"'equities'",
",",
"given_model",
"=",
"us_equities",
",",
"supported_asset_types",
"=",
"us_equities",
".",
"allowed_asset_types",
",",
")",
"self",
".",
"blotter",
".",
"commission_models",
"[",
"Equity",
"]",
"=",
"us_equities",
"if",
"us_futures",
"is",
"not",
"None",
":",
"if",
"Future",
"not",
"in",
"us_futures",
".",
"allowed_asset_types",
":",
"raise",
"IncompatibleCommissionModel",
"(",
"asset_type",
"=",
"'futures'",
",",
"given_model",
"=",
"us_futures",
",",
"supported_asset_types",
"=",
"us_futures",
".",
"allowed_asset_types",
",",
")",
"self",
".",
"blotter",
".",
"commission_models",
"[",
"Future",
"]",
"=",
"us_futures"
] | Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar` | [
"Sets",
"the",
"commission",
"models",
"for",
"the",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1528-L1563 |
26,101 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.set_cancel_policy | def set_cancel_policy(self, cancel_policy):
"""Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel`
"""
if not isinstance(cancel_policy, CancelPolicy):
raise UnsupportedCancelPolicy()
if self.initialized:
raise SetCancelPolicyPostInit()
self.blotter.cancel_policy = cancel_policy | python | def set_cancel_policy(self, cancel_policy):
"""Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel`
"""
if not isinstance(cancel_policy, CancelPolicy):
raise UnsupportedCancelPolicy()
if self.initialized:
raise SetCancelPolicyPostInit()
self.blotter.cancel_policy = cancel_policy | [
"def",
"set_cancel_policy",
"(",
"self",
",",
"cancel_policy",
")",
":",
"if",
"not",
"isinstance",
"(",
"cancel_policy",
",",
"CancelPolicy",
")",
":",
"raise",
"UnsupportedCancelPolicy",
"(",
")",
"if",
"self",
".",
"initialized",
":",
"raise",
"SetCancelPolicyPostInit",
"(",
")",
"self",
".",
"blotter",
".",
"cancel_policy",
"=",
"cancel_policy"
] | Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel` | [
"Sets",
"the",
"order",
"cancellation",
"policy",
"for",
"the",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1566-L1585 |
26,102 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.order_percent | def order_percent(self,
asset,
percent,
limit_price=None,
stop_price=None,
style=None):
"""Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_percent_amount(asset, percent)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | python | def order_percent(self,
asset,
percent,
limit_price=None,
stop_price=None,
style=None):
"""Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_percent_amount(asset, percent)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | [
"def",
"order_percent",
"(",
"self",
",",
"asset",
",",
"percent",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_can_order_asset",
"(",
"asset",
")",
":",
"return",
"None",
"amount",
"=",
"self",
".",
"_calculate_order_percent_amount",
"(",
"asset",
",",
"percent",
")",
"return",
"self",
".",
"order",
"(",
"asset",
",",
"amount",
",",
"limit_price",
"=",
"limit_price",
",",
"stop_price",
"=",
"stop_price",
",",
"style",
"=",
"style",
")"
] | Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value` | [
"Place",
"an",
"order",
"in",
"the",
"specified",
"asset",
"corresponding",
"to",
"the",
"given",
"percent",
"of",
"the",
"current",
"portfolio",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1616-L1662 |
26,103 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.order_target | def order_target(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | python | def order_target(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | [
"def",
"order_target",
"(",
"self",
",",
"asset",
",",
"target",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_can_order_asset",
"(",
"asset",
")",
":",
"return",
"None",
"amount",
"=",
"self",
".",
"_calculate_order_target_amount",
"(",
"asset",
",",
"target",
")",
"return",
"self",
".",
"order",
"(",
"asset",
",",
"amount",
",",
"limit_price",
"=",
"limit_price",
",",
"stop_price",
"=",
"stop_price",
",",
"style",
"=",
"style",
")"
] | Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value` | [
"Place",
"an",
"order",
"to",
"adjust",
"a",
"position",
"to",
"a",
"target",
"number",
"of",
"shares",
".",
"If",
"the",
"position",
"doesn",
"t",
"already",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"a",
"new",
"order",
".",
"If",
"the",
"position",
"does",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"an",
"order",
"for",
"the",
"difference",
"between",
"the",
"target",
"number",
"of",
"shares",
"and",
"the",
"current",
"number",
"of",
"shares",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1670-L1732 |
26,104 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.order_target_value | def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | python | def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | [
"def",
"order_target_value",
"(",
"self",
",",
"asset",
",",
"target",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_can_order_asset",
"(",
"asset",
")",
":",
"return",
"None",
"target_amount",
"=",
"self",
".",
"_calculate_order_value_amount",
"(",
"asset",
",",
"target",
")",
"amount",
"=",
"self",
".",
"_calculate_order_target_amount",
"(",
"asset",
",",
"target_amount",
")",
"return",
"self",
".",
"order",
"(",
"asset",
",",
"amount",
",",
"limit_price",
"=",
"limit_price",
",",
"stop_price",
"=",
"stop_price",
",",
"style",
"=",
"style",
")"
] | Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent` | [
"Place",
"an",
"order",
"to",
"adjust",
"a",
"position",
"to",
"a",
"target",
"value",
".",
"If",
"the",
"position",
"doesn",
"t",
"already",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"a",
"new",
"order",
".",
"If",
"the",
"position",
"does",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"an",
"order",
"for",
"the",
"difference",
"between",
"the",
"target",
"value",
"and",
"the",
"current",
"value",
".",
"If",
"the",
"Asset",
"being",
"ordered",
"is",
"a",
"Future",
"the",
"target",
"value",
"calculated",
"is",
"actually",
"the",
"target",
"exposure",
"as",
"Futures",
"have",
"no",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1743-L1807 |
26,105 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.order_target_percent | def order_target_percent(self, asset, target,
limit_price=None, stop_price=None, style=None):
"""Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_percent_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | python | def order_target_percent(self, asset, target,
limit_price=None, stop_price=None, style=None):
"""Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_percent_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | [
"def",
"order_target_percent",
"(",
"self",
",",
"asset",
",",
"target",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_can_order_asset",
"(",
"asset",
")",
":",
"return",
"None",
"amount",
"=",
"self",
".",
"_calculate_order_target_percent_amount",
"(",
"asset",
",",
"target",
")",
"return",
"self",
".",
"order",
"(",
"asset",
",",
"amount",
",",
"limit_price",
"=",
"limit_price",
",",
"stop_price",
"=",
"stop_price",
",",
"style",
"=",
"style",
")"
] | Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value` | [
"Place",
"an",
"order",
"to",
"adjust",
"a",
"position",
"to",
"a",
"target",
"percent",
"of",
"the",
"current",
"portfolio",
"value",
".",
"If",
"the",
"position",
"doesn",
"t",
"already",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"a",
"new",
"order",
".",
"If",
"the",
"position",
"does",
"exist",
"this",
"is",
"equivalent",
"to",
"placing",
"an",
"order",
"for",
"the",
"difference",
"between",
"the",
"target",
"percent",
"and",
"the",
"current",
"percent",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1811-L1870 |
26,106 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.batch_market_order | def batch_market_order(self, share_counts):
"""Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders.
"""
style = MarketOrder()
order_args = [
(asset, amount, style)
for (asset, amount) in iteritems(share_counts)
if amount
]
return self.blotter.batch_order(order_args) | python | def batch_market_order(self, share_counts):
"""Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders.
"""
style = MarketOrder()
order_args = [
(asset, amount, style)
for (asset, amount) in iteritems(share_counts)
if amount
]
return self.blotter.batch_order(order_args) | [
"def",
"batch_market_order",
"(",
"self",
",",
"share_counts",
")",
":",
"style",
"=",
"MarketOrder",
"(",
")",
"order_args",
"=",
"[",
"(",
"asset",
",",
"amount",
",",
"style",
")",
"for",
"(",
"asset",
",",
"amount",
")",
"in",
"iteritems",
"(",
"share_counts",
")",
"if",
"amount",
"]",
"return",
"self",
".",
"blotter",
".",
"batch_order",
"(",
"order_args",
")"
] | Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders. | [
"Place",
"a",
"batch",
"market",
"order",
"for",
"multiple",
"assets",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1879-L1898 |
26,107 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.get_open_orders | def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
if asset is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if asset in self.blotter.open_orders:
orders = self.blotter.open_orders[asset]
return [order.to_api_obj() for order in orders]
return [] | python | def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
if asset is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if asset in self.blotter.open_orders:
orders = self.blotter.open_orders[asset]
return [order.to_api_obj() for order in orders]
return [] | [
"def",
"get_open_orders",
"(",
"self",
",",
"asset",
"=",
"None",
")",
":",
"if",
"asset",
"is",
"None",
":",
"return",
"{",
"key",
":",
"[",
"order",
".",
"to_api_obj",
"(",
")",
"for",
"order",
"in",
"orders",
"]",
"for",
"key",
",",
"orders",
"in",
"iteritems",
"(",
"self",
".",
"blotter",
".",
"open_orders",
")",
"if",
"orders",
"}",
"if",
"asset",
"in",
"self",
".",
"blotter",
".",
"open_orders",
":",
"orders",
"=",
"self",
".",
"blotter",
".",
"open_orders",
"[",
"asset",
"]",
"return",
"[",
"order",
".",
"to_api_obj",
"(",
")",
"for",
"order",
"in",
"orders",
"]",
"return",
"[",
"]"
] | Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset. | [
"Retrieve",
"all",
"of",
"the",
"current",
"open",
"orders",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1903-L1929 |
26,108 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.get_order | def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj() | python | def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj() | [
"def",
"get_order",
"(",
"self",
",",
"order_id",
")",
":",
"if",
"order_id",
"in",
"self",
".",
"blotter",
".",
"orders",
":",
"return",
"self",
".",
"blotter",
".",
"orders",
"[",
"order_id",
"]",
".",
"to_api_obj",
"(",
")"
] | Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object. | [
"Lookup",
"an",
"order",
"based",
"on",
"the",
"order",
"id",
"returned",
"from",
"one",
"of",
"the",
"order",
"functions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1932-L1947 |
26,109 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.cancel_order | def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id) | python | def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id) | [
"def",
"cancel_order",
"(",
"self",
",",
"order_param",
")",
":",
"order_id",
"=",
"order_param",
"if",
"isinstance",
"(",
"order_param",
",",
"zipline",
".",
"protocol",
".",
"Order",
")",
":",
"order_id",
"=",
"order_param",
".",
"id",
"self",
".",
"blotter",
".",
"cancel",
"(",
"order_id",
")"
] | Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel. | [
"Cancel",
"an",
"open",
"order",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1950-L1962 |
26,110 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.register_account_control | def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control) | python | def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control) | [
"def",
"register_account_control",
"(",
"self",
",",
"control",
")",
":",
"if",
"self",
".",
"initialized",
":",
"raise",
"RegisterAccountControlPostInit",
"(",
")",
"self",
".",
"account_controls",
".",
"append",
"(",
"control",
")"
] | Register a new AccountControl to be checked on each bar. | [
"Register",
"a",
"new",
"AccountControl",
"to",
"be",
"checked",
"on",
"each",
"bar",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2028-L2034 |
26,111 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.set_min_leverage | def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control) | python | def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control) | [
"def",
"set_min_leverage",
"(",
"self",
",",
"min_leverage",
",",
"grace_period",
")",
":",
"deadline",
"=",
"self",
".",
"sim_params",
".",
"start_session",
"+",
"grace_period",
"control",
"=",
"MinLeverage",
"(",
"min_leverage",
",",
"deadline",
")",
"self",
".",
"register_account_control",
"(",
"control",
")"
] | Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage. | [
"Set",
"a",
"limit",
"on",
"the",
"minimum",
"leverage",
"of",
"the",
"algorithm",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2057-L2069 |
26,112 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.register_trading_control | def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control) | python | def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control) | [
"def",
"register_trading_control",
"(",
"self",
",",
"control",
")",
":",
"if",
"self",
".",
"initialized",
":",
"raise",
"RegisterTradingControlPostInit",
"(",
")",
"self",
".",
"trading_controls",
".",
"append",
"(",
"control",
")"
] | Register a new TradingControl to be checked prior to order calls. | [
"Register",
"a",
"new",
"TradingControl",
"to",
"be",
"checked",
"prior",
"to",
"order",
"calls",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2075-L2081 |
26,113 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.set_max_order_count | def set_max_order_count(self, max_count, on_error='fail'):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control) | python | def set_max_order_count(self, max_count, on_error='fail'):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control) | [
"def",
"set_max_order_count",
"(",
"self",
",",
"max_count",
",",
"on_error",
"=",
"'fail'",
")",
":",
"control",
"=",
"MaxOrderCount",
"(",
"on_error",
",",
"max_count",
")",
"self",
".",
"register_trading_control",
"(",
"control",
")"
] | Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day. | [
"Set",
"a",
"limit",
"on",
"the",
"number",
"of",
"orders",
"that",
"can",
"be",
"placed",
"in",
"a",
"single",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2146-L2156 |
26,114 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.attach_pipeline | def attach_pipeline(self, pipeline, name, chunks=None, eager=True):
"""Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output`
"""
if chunks is None:
# Make the first chunk smaller to get more immediate results:
# (one week, then every half year)
chunks = chain([5], repeat(126))
elif isinstance(chunks, int):
chunks = repeat(chunks)
if name in self._pipelines:
raise DuplicatePipelineName(name=name)
self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager)
# Return the pipeline to allow expressions like
# p = attach_pipeline(Pipeline(), 'name')
return pipeline | python | def attach_pipeline(self, pipeline, name, chunks=None, eager=True):
"""Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output`
"""
if chunks is None:
# Make the first chunk smaller to get more immediate results:
# (one week, then every half year)
chunks = chain([5], repeat(126))
elif isinstance(chunks, int):
chunks = repeat(chunks)
if name in self._pipelines:
raise DuplicatePipelineName(name=name)
self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager)
# Return the pipeline to allow expressions like
# p = attach_pipeline(Pipeline(), 'name')
return pipeline | [
"def",
"attach_pipeline",
"(",
"self",
",",
"pipeline",
",",
"name",
",",
"chunks",
"=",
"None",
",",
"eager",
"=",
"True",
")",
":",
"if",
"chunks",
"is",
"None",
":",
"# Make the first chunk smaller to get more immediate results:",
"# (one week, then every half year)",
"chunks",
"=",
"chain",
"(",
"[",
"5",
"]",
",",
"repeat",
"(",
"126",
")",
")",
"elif",
"isinstance",
"(",
"chunks",
",",
"int",
")",
":",
"chunks",
"=",
"repeat",
"(",
"chunks",
")",
"if",
"name",
"in",
"self",
".",
"_pipelines",
":",
"raise",
"DuplicatePipelineName",
"(",
"name",
"=",
"name",
")",
"self",
".",
"_pipelines",
"[",
"name",
"]",
"=",
"AttachedPipeline",
"(",
"pipeline",
",",
"iter",
"(",
"chunks",
")",
",",
"eager",
")",
"# Return the pipeline to allow expressions like",
"# p = attach_pipeline(Pipeline(), 'name')",
"return",
"pipeline"
] | Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output` | [
"Register",
"a",
"pipeline",
"to",
"be",
"computed",
"at",
"the",
"start",
"of",
"each",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2228-L2270 |
26,115 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm._pipeline_output | def _pipeline_output(self, pipeline, chunks, name):
"""
Internal implementation of `pipeline_output`.
"""
today = normalize_date(self.get_datetime())
try:
data = self._pipeline_cache.get(name, today)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, today, next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[today]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns) | python | def _pipeline_output(self, pipeline, chunks, name):
"""
Internal implementation of `pipeline_output`.
"""
today = normalize_date(self.get_datetime())
try:
data = self._pipeline_cache.get(name, today)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, today, next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[today]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns) | [
"def",
"_pipeline_output",
"(",
"self",
",",
"pipeline",
",",
"chunks",
",",
"name",
")",
":",
"today",
"=",
"normalize_date",
"(",
"self",
".",
"get_datetime",
"(",
")",
")",
"try",
":",
"data",
"=",
"self",
".",
"_pipeline_cache",
".",
"get",
"(",
"name",
",",
"today",
")",
"except",
"KeyError",
":",
"# Calculate the next block.",
"data",
",",
"valid_until",
"=",
"self",
".",
"run_pipeline",
"(",
"pipeline",
",",
"today",
",",
"next",
"(",
"chunks",
")",
",",
")",
"self",
".",
"_pipeline_cache",
".",
"set",
"(",
"name",
",",
"data",
",",
"valid_until",
")",
"# Now that we have a cached result, try to return the data for today.",
"try",
":",
"return",
"data",
".",
"loc",
"[",
"today",
"]",
"except",
"KeyError",
":",
"# This happens if no assets passed the pipeline screen on a given",
"# day.",
"return",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"[",
"]",
",",
"columns",
"=",
"data",
".",
"columns",
")"
] | Internal implementation of `pipeline_output`. | [
"Internal",
"implementation",
"of",
"pipeline_output",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2308-L2328 |
26,116 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.run_pipeline | def run_pipeline(self, pipeline, start_session, chunksize):
"""
Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline
"""
sessions = self.trading_calendar.all_sessions
# Load data starting from the previous trading day...
start_date_loc = sessions.get_loc(start_session)
# ...continuing until either the day before the simulation end, or
# until chunksize days of data have been loaded.
sim_end_session = self.sim_params.end_session
end_loc = min(
start_date_loc + chunksize,
sessions.get_loc(sim_end_session)
)
end_session = sessions[end_loc]
return \
self.engine.run_pipeline(pipeline, start_session, end_session), \
end_session | python | def run_pipeline(self, pipeline, start_session, chunksize):
"""
Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline
"""
sessions = self.trading_calendar.all_sessions
# Load data starting from the previous trading day...
start_date_loc = sessions.get_loc(start_session)
# ...continuing until either the day before the simulation end, or
# until chunksize days of data have been loaded.
sim_end_session = self.sim_params.end_session
end_loc = min(
start_date_loc + chunksize,
sessions.get_loc(sim_end_session)
)
end_session = sessions[end_loc]
return \
self.engine.run_pipeline(pipeline, start_session, end_session), \
end_session | [
"def",
"run_pipeline",
"(",
"self",
",",
"pipeline",
",",
"start_session",
",",
"chunksize",
")",
":",
"sessions",
"=",
"self",
".",
"trading_calendar",
".",
"all_sessions",
"# Load data starting from the previous trading day...",
"start_date_loc",
"=",
"sessions",
".",
"get_loc",
"(",
"start_session",
")",
"# ...continuing until either the day before the simulation end, or",
"# until chunksize days of data have been loaded.",
"sim_end_session",
"=",
"self",
".",
"sim_params",
".",
"end_session",
"end_loc",
"=",
"min",
"(",
"start_date_loc",
"+",
"chunksize",
",",
"sessions",
".",
"get_loc",
"(",
"sim_end_session",
")",
")",
"end_session",
"=",
"sessions",
"[",
"end_loc",
"]",
"return",
"self",
".",
"engine",
".",
"run_pipeline",
"(",
"pipeline",
",",
"start_session",
",",
"end_session",
")",
",",
"end_session"
] | Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline | [
"Compute",
"pipeline",
"providing",
"values",
"for",
"at",
"least",
"start_date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2330-L2366 |
26,117 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.all_api_methods | def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
] | python | def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
] | [
"def",
"all_api_methods",
"(",
"cls",
")",
":",
"return",
"[",
"fn",
"for",
"fn",
"in",
"itervalues",
"(",
"vars",
"(",
"cls",
")",
")",
"if",
"getattr",
"(",
"fn",
",",
"'is_api_method'",
",",
"False",
")",
"]"
] | Return a list of all the TradingAlgorithm API methods. | [
"Return",
"a",
"list",
"of",
"all",
"the",
"TradingAlgorithm",
"API",
"methods",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L2394-L2401 |
26,118 | quantopian/zipline | zipline/utils/argcheck.py | _expect_extra | def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args) | python | def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args) | [
"def",
"_expect_extra",
"(",
"expected",
",",
"present",
",",
"exc_unexpected",
",",
"exc_missing",
",",
"exc_args",
")",
":",
"if",
"present",
":",
"if",
"not",
"expected",
":",
"raise",
"exc_unexpected",
"(",
"*",
"exc_args",
")",
"elif",
"expected",
"and",
"expected",
"is",
"not",
"Argument",
".",
"ignore",
":",
"raise",
"exc_missing",
"(",
"*",
"exc_args",
")"
] | Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected. | [
"Checks",
"for",
"the",
"presence",
"of",
"an",
"extra",
"to",
"the",
"argument",
"list",
".",
"Raises",
"expections",
"if",
"this",
"is",
"unexpected",
"or",
"if",
"it",
"is",
"missing",
"and",
"expected",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/argcheck.py#L131-L140 |
26,119 | quantopian/zipline | zipline/finance/asset_restrictions.py | StaticRestrictions.is_restricted | def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
) | python | def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
) | [
"def",
"is_restricted",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"if",
"isinstance",
"(",
"assets",
",",
"Asset",
")",
":",
"return",
"assets",
"in",
"self",
".",
"_restricted_set",
"return",
"pd",
".",
"Series",
"(",
"index",
"=",
"pd",
".",
"Index",
"(",
"assets",
")",
",",
"data",
"=",
"vectorized_is_element",
"(",
"assets",
",",
"self",
".",
"_restricted_set",
")",
")"
] | An asset is restricted for all dts if it is in the static list. | [
"An",
"asset",
"is",
"restricted",
"for",
"all",
"dts",
"if",
"it",
"is",
"in",
"the",
"static",
"list",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/asset_restrictions.py#L143-L152 |
26,120 | quantopian/zipline | zipline/finance/asset_restrictions.py | HistoricalRestrictions.is_restricted | def is_restricted(self, assets, dt):
"""
Returns whether or not an asset or iterable of assets is restricted
on a dt.
"""
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=dt)
return pd.Series(
index=pd.Index(assets),
data=vectorize(is_restricted, otypes=[bool])(assets)
) | python | def is_restricted(self, assets, dt):
"""
Returns whether or not an asset or iterable of assets is restricted
on a dt.
"""
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=dt)
return pd.Series(
index=pd.Index(assets),
data=vectorize(is_restricted, otypes=[bool])(assets)
) | [
"def",
"is_restricted",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"if",
"isinstance",
"(",
"assets",
",",
"Asset",
")",
":",
"return",
"self",
".",
"_is_restricted_for_asset",
"(",
"assets",
",",
"dt",
")",
"is_restricted",
"=",
"partial",
"(",
"self",
".",
"_is_restricted_for_asset",
",",
"dt",
"=",
"dt",
")",
"return",
"pd",
".",
"Series",
"(",
"index",
"=",
"pd",
".",
"Index",
"(",
"assets",
")",
",",
"data",
"=",
"vectorize",
"(",
"is_restricted",
",",
"otypes",
"=",
"[",
"bool",
"]",
")",
"(",
"assets",
")",
")"
] | Returns whether or not an asset or iterable of assets is restricted
on a dt. | [
"Returns",
"whether",
"or",
"not",
"an",
"asset",
"or",
"iterable",
"of",
"assets",
"is",
"restricted",
"on",
"a",
"dt",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/asset_restrictions.py#L177-L189 |
26,121 | quantopian/zipline | zipline/finance/ledger.py | PositionTracker.pay_dividends | def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except KeyError:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment['payment_asset']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = Position(
payment_asset,
)
position.amount += share_count
return net_cash_payment | python | def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except KeyError:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment['payment_asset']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = Position(
payment_asset,
)
position.amount += share_count
return net_cash_payment | [
"def",
"pay_dividends",
"(",
"self",
",",
"next_trading_day",
")",
":",
"net_cash_payment",
"=",
"0.0",
"try",
":",
"payments",
"=",
"self",
".",
"_unpaid_dividends",
"[",
"next_trading_day",
"]",
"# Mark these dividends as paid by dropping them from our unpaid",
"del",
"self",
".",
"_unpaid_dividends",
"[",
"next_trading_day",
"]",
"except",
"KeyError",
":",
"payments",
"=",
"[",
"]",
"# representing the fact that we're required to reimburse the owner of",
"# the stock for any dividends paid while borrowing.",
"for",
"payment",
"in",
"payments",
":",
"net_cash_payment",
"+=",
"payment",
"[",
"'amount'",
"]",
"# Add stock for any stock dividends paid. Again, the values here may",
"# be negative in the case of short positions.",
"try",
":",
"stock_payments",
"=",
"self",
".",
"_unpaid_stock_dividends",
"[",
"next_trading_day",
"]",
"except",
"KeyError",
":",
"stock_payments",
"=",
"[",
"]",
"for",
"stock_payment",
"in",
"stock_payments",
":",
"payment_asset",
"=",
"stock_payment",
"[",
"'payment_asset'",
"]",
"share_count",
"=",
"stock_payment",
"[",
"'share_count'",
"]",
"# note we create a Position for stock dividend if we don't",
"# already own the asset",
"if",
"payment_asset",
"in",
"self",
".",
"positions",
":",
"position",
"=",
"self",
".",
"positions",
"[",
"payment_asset",
"]",
"else",
":",
"position",
"=",
"self",
".",
"positions",
"[",
"payment_asset",
"]",
"=",
"Position",
"(",
"payment_asset",
",",
")",
"position",
".",
"amount",
"+=",
"share_count",
"return",
"net_cash_payment"
] | Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends. | [
"Returns",
"a",
"cash",
"payment",
"based",
"on",
"the",
"dividends",
"that",
"should",
"be",
"paid",
"out",
"according",
"to",
"the",
"accumulated",
"bookkeeping",
"of",
"earned",
"unpaid",
"and",
"stock",
"dividends",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L181-L222 |
26,122 | quantopian/zipline | zipline/finance/ledger.py | PositionTracker.stats | def stats(self):
"""The current status of the positions.
Returns
-------
stats : PositionStats
The current stats position stats.
Notes
-----
This is cached, repeated access will not recompute the stats until
the stats may have changed.
"""
if self._dirty_stats:
calculate_position_tracker_stats(self.positions, self._stats)
self._dirty_stats = False
return self._stats | python | def stats(self):
"""The current status of the positions.
Returns
-------
stats : PositionStats
The current stats position stats.
Notes
-----
This is cached, repeated access will not recompute the stats until
the stats may have changed.
"""
if self._dirty_stats:
calculate_position_tracker_stats(self.positions, self._stats)
self._dirty_stats = False
return self._stats | [
"def",
"stats",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dirty_stats",
":",
"calculate_position_tracker_stats",
"(",
"self",
".",
"positions",
",",
"self",
".",
"_stats",
")",
"self",
".",
"_dirty_stats",
"=",
"False",
"return",
"self",
".",
"_stats"
] | The current status of the positions.
Returns
-------
stats : PositionStats
The current stats position stats.
Notes
-----
This is cached, repeated access will not recompute the stats until
the stats may have changed. | [
"The",
"current",
"status",
"of",
"the",
"positions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L288-L305 |
26,123 | quantopian/zipline | zipline/finance/ledger.py | Ledger.process_transaction | def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict] | python | def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict] | [
"def",
"process_transaction",
"(",
"self",
",",
"transaction",
")",
":",
"asset",
"=",
"transaction",
".",
"asset",
"if",
"isinstance",
"(",
"asset",
",",
"Future",
")",
":",
"try",
":",
"old_price",
"=",
"self",
".",
"_payout_last_sale_prices",
"[",
"asset",
"]",
"except",
"KeyError",
":",
"self",
".",
"_payout_last_sale_prices",
"[",
"asset",
"]",
"=",
"transaction",
".",
"price",
"else",
":",
"position",
"=",
"self",
".",
"position_tracker",
".",
"positions",
"[",
"asset",
"]",
"amount",
"=",
"position",
".",
"amount",
"price",
"=",
"transaction",
".",
"price",
"self",
".",
"_cash_flow",
"(",
"self",
".",
"_calculate_payout",
"(",
"asset",
".",
"price_multiplier",
",",
"amount",
",",
"old_price",
",",
"price",
",",
")",
",",
")",
"if",
"amount",
"+",
"transaction",
".",
"amount",
"==",
"0",
":",
"del",
"self",
".",
"_payout_last_sale_prices",
"[",
"asset",
"]",
"else",
":",
"self",
".",
"_payout_last_sale_prices",
"[",
"asset",
"]",
"=",
"price",
"else",
":",
"self",
".",
"_cash_flow",
"(",
"-",
"(",
"transaction",
".",
"price",
"*",
"transaction",
".",
"amount",
")",
")",
"self",
".",
"position_tracker",
".",
"execute_transaction",
"(",
"transaction",
")",
"# we only ever want the dict form from now on",
"transaction_dict",
"=",
"transaction",
".",
"to_dict",
"(",
")",
"try",
":",
"self",
".",
"_processed_transactions",
"[",
"transaction",
".",
"dt",
"]",
".",
"append",
"(",
"transaction_dict",
",",
")",
"except",
"KeyError",
":",
"self",
".",
"_processed_transactions",
"[",
"transaction",
".",
"dt",
"]",
"=",
"[",
"transaction_dict",
"]"
] | Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute. | [
"Add",
"a",
"transaction",
"to",
"ledger",
"updating",
"the",
"current",
"state",
"as",
"needed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L479-L523 |
26,124 | quantopian/zipline | zipline/finance/ledger.py | Ledger.process_order | def process_order(self, order):
"""Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
"""
try:
dt_orders = self._orders_by_modified[order.dt]
except KeyError:
self._orders_by_modified[order.dt] = OrderedDict([
(order.id, order),
])
self._orders_by_id[order.id] = order
else:
self._orders_by_id[order.id] = dt_orders[order.id] = order
# to preserve the order of the orders by modified date
move_to_end(dt_orders, order.id, last=True)
move_to_end(self._orders_by_id, order.id, last=True) | python | def process_order(self, order):
"""Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
"""
try:
dt_orders = self._orders_by_modified[order.dt]
except KeyError:
self._orders_by_modified[order.dt] = OrderedDict([
(order.id, order),
])
self._orders_by_id[order.id] = order
else:
self._orders_by_id[order.id] = dt_orders[order.id] = order
# to preserve the order of the orders by modified date
move_to_end(dt_orders, order.id, last=True)
move_to_end(self._orders_by_id, order.id, last=True) | [
"def",
"process_order",
"(",
"self",
",",
"order",
")",
":",
"try",
":",
"dt_orders",
"=",
"self",
".",
"_orders_by_modified",
"[",
"order",
".",
"dt",
"]",
"except",
"KeyError",
":",
"self",
".",
"_orders_by_modified",
"[",
"order",
".",
"dt",
"]",
"=",
"OrderedDict",
"(",
"[",
"(",
"order",
".",
"id",
",",
"order",
")",
",",
"]",
")",
"self",
".",
"_orders_by_id",
"[",
"order",
".",
"id",
"]",
"=",
"order",
"else",
":",
"self",
".",
"_orders_by_id",
"[",
"order",
".",
"id",
"]",
"=",
"dt_orders",
"[",
"order",
".",
"id",
"]",
"=",
"order",
"# to preserve the order of the orders by modified date",
"move_to_end",
"(",
"dt_orders",
",",
"order",
".",
"id",
",",
"last",
"=",
"True",
")",
"move_to_end",
"(",
"self",
".",
"_orders_by_id",
",",
"order",
".",
"id",
",",
"last",
"=",
"True",
")"
] | Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record. | [
"Keep",
"track",
"of",
"an",
"order",
"that",
"was",
"placed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L537-L557 |
26,125 | quantopian/zipline | zipline/finance/ledger.py | Ledger.process_commission | def process_commission(self, commission):
"""Process the commission.
Parameters
----------
commission : zp.Event
The commission being paid.
"""
asset = commission['asset']
cost = commission['cost']
self.position_tracker.handle_commission(asset, cost)
self._cash_flow(-cost) | python | def process_commission(self, commission):
"""Process the commission.
Parameters
----------
commission : zp.Event
The commission being paid.
"""
asset = commission['asset']
cost = commission['cost']
self.position_tracker.handle_commission(asset, cost)
self._cash_flow(-cost) | [
"def",
"process_commission",
"(",
"self",
",",
"commission",
")",
":",
"asset",
"=",
"commission",
"[",
"'asset'",
"]",
"cost",
"=",
"commission",
"[",
"'cost'",
"]",
"self",
".",
"position_tracker",
".",
"handle_commission",
"(",
"asset",
",",
"cost",
")",
"self",
".",
"_cash_flow",
"(",
"-",
"cost",
")"
] | Process the commission.
Parameters
----------
commission : zp.Event
The commission being paid. | [
"Process",
"the",
"commission",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L559-L571 |
26,126 | quantopian/zipline | zipline/finance/ledger.py | Ledger.process_dividends | def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
stock_dividends = (
adjustment_reader.get_stock_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
) | python | def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
stock_dividends = (
adjustment_reader.get_stock_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
) | [
"def",
"process_dividends",
"(",
"self",
",",
"next_session",
",",
"asset_finder",
",",
"adjustment_reader",
")",
":",
"position_tracker",
"=",
"self",
".",
"position_tracker",
"# Earn dividends whose ex_date is the next trading day. We need to",
"# check if we own any of these stocks so we know to pay them out when",
"# the pay date comes.",
"held_sids",
"=",
"set",
"(",
"position_tracker",
".",
"positions",
")",
"if",
"held_sids",
":",
"cash_dividends",
"=",
"adjustment_reader",
".",
"get_dividends_with_ex_date",
"(",
"held_sids",
",",
"next_session",
",",
"asset_finder",
")",
"stock_dividends",
"=",
"(",
"adjustment_reader",
".",
"get_stock_dividends_with_ex_date",
"(",
"held_sids",
",",
"next_session",
",",
"asset_finder",
")",
")",
"# Earning a dividend just marks that we need to get paid out on",
"# the dividend's pay-date. This does not affect our cash yet.",
"position_tracker",
".",
"earn_dividends",
"(",
"cash_dividends",
",",
"stock_dividends",
",",
")",
"# Pay out the dividends whose pay-date is the next session. This does",
"# affect out cash.",
"self",
".",
"_cash_flow",
"(",
"position_tracker",
".",
"pay_dividends",
"(",
"next_session",
",",
")",
",",
")"
] | Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session | [
"Process",
"dividends",
"for",
"the",
"next",
"session",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L582-L621 |
26,127 | quantopian/zipline | zipline/finance/ledger.py | Ledger.transactions | def transactions(self, dt=None):
"""Retrieve the dict-form of all of the transactions in a given bar or
for the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up transactions for. If not passed,
or None is explicitly passed, all of the transactions will be
returned.
Returns
-------
transactions : list[dict]
The transaction information.
"""
if dt is None:
# flatten the by-day transactions
return [
txn
for by_day in itervalues(self._processed_transactions)
for txn in by_day
]
return self._processed_transactions.get(dt, []) | python | def transactions(self, dt=None):
"""Retrieve the dict-form of all of the transactions in a given bar or
for the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up transactions for. If not passed,
or None is explicitly passed, all of the transactions will be
returned.
Returns
-------
transactions : list[dict]
The transaction information.
"""
if dt is None:
# flatten the by-day transactions
return [
txn
for by_day in itervalues(self._processed_transactions)
for txn in by_day
]
return self._processed_transactions.get(dt, []) | [
"def",
"transactions",
"(",
"self",
",",
"dt",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"# flatten the by-day transactions",
"return",
"[",
"txn",
"for",
"by_day",
"in",
"itervalues",
"(",
"self",
".",
"_processed_transactions",
")",
"for",
"txn",
"in",
"by_day",
"]",
"return",
"self",
".",
"_processed_transactions",
".",
"get",
"(",
"dt",
",",
"[",
"]",
")"
] | Retrieve the dict-form of all of the transactions in a given bar or
for the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up transactions for. If not passed,
or None is explicitly passed, all of the transactions will be
returned.
Returns
-------
transactions : list[dict]
The transaction information. | [
"Retrieve",
"the",
"dict",
"-",
"form",
"of",
"all",
"of",
"the",
"transactions",
"in",
"a",
"given",
"bar",
"or",
"for",
"the",
"whole",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L631-L655 |
26,128 | quantopian/zipline | zipline/finance/ledger.py | Ledger.orders | def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in itervalues(self._orders_by_id)]
return [
o.to_dict()
for o in itervalues(self._orders_by_modified.get(dt, {}))
] | python | def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in itervalues(self._orders_by_id)]
return [
o.to_dict()
for o in itervalues(self._orders_by_modified.get(dt, {}))
] | [
"def",
"orders",
"(",
"self",
",",
"dt",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"# orders by id is already flattened",
"return",
"[",
"o",
".",
"to_dict",
"(",
")",
"for",
"o",
"in",
"itervalues",
"(",
"self",
".",
"_orders_by_id",
")",
"]",
"return",
"[",
"o",
".",
"to_dict",
"(",
")",
"for",
"o",
"in",
"itervalues",
"(",
"self",
".",
"_orders_by_modified",
".",
"get",
"(",
"dt",
",",
"{",
"}",
")",
")",
"]"
] | Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information. | [
"Retrieve",
"the",
"dict",
"-",
"form",
"of",
"all",
"of",
"the",
"orders",
"in",
"a",
"given",
"bar",
"or",
"for",
"the",
"whole",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L657-L679 |
26,129 | quantopian/zipline | zipline/finance/ledger.py | Ledger.update_portfolio | def update_portfolio(self):
"""Force a computation of the current portfolio state.
"""
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = (
position_stats.net_value
)
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (
(1 + portfolio.returns) *
(1 + returns) -
1
)
# the portfolio has been fully synced
self._dirty_portfolio = False | python | def update_portfolio(self):
"""Force a computation of the current portfolio state.
"""
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = (
position_stats.net_value
)
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (
(1 + portfolio.returns) *
(1 + returns) -
1
)
# the portfolio has been fully synced
self._dirty_portfolio = False | [
"def",
"update_portfolio",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_dirty_portfolio",
":",
"return",
"portfolio",
"=",
"self",
".",
"_portfolio",
"pt",
"=",
"self",
".",
"position_tracker",
"portfolio",
".",
"positions",
"=",
"pt",
".",
"get_positions",
"(",
")",
"position_stats",
"=",
"pt",
".",
"stats",
"portfolio",
".",
"positions_value",
"=",
"position_value",
"=",
"(",
"position_stats",
".",
"net_value",
")",
"portfolio",
".",
"positions_exposure",
"=",
"position_stats",
".",
"net_exposure",
"self",
".",
"_cash_flow",
"(",
"self",
".",
"_get_payout_total",
"(",
"pt",
".",
"positions",
")",
")",
"start_value",
"=",
"portfolio",
".",
"portfolio_value",
"# update the new starting value",
"portfolio",
".",
"portfolio_value",
"=",
"end_value",
"=",
"portfolio",
".",
"cash",
"+",
"position_value",
"pnl",
"=",
"end_value",
"-",
"start_value",
"if",
"start_value",
"!=",
"0",
":",
"returns",
"=",
"pnl",
"/",
"start_value",
"else",
":",
"returns",
"=",
"0.0",
"portfolio",
".",
"pnl",
"+=",
"pnl",
"portfolio",
".",
"returns",
"=",
"(",
"(",
"1",
"+",
"portfolio",
".",
"returns",
")",
"*",
"(",
"1",
"+",
"returns",
")",
"-",
"1",
")",
"# the portfolio has been fully synced",
"self",
".",
"_dirty_portfolio",
"=",
"False"
] | Force a computation of the current portfolio state. | [
"Force",
"a",
"computation",
"of",
"the",
"current",
"portfolio",
"state",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L703-L740 |
26,130 | quantopian/zipline | zipline/finance/ledger.py | Ledger.override_account_fields | def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
"""Override fields on ``self.account``.
"""
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self'] | python | def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
"""Override fields on ``self.account``.
"""
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self'] | [
"def",
"override_account_fields",
"(",
"self",
",",
"settled_cash",
"=",
"not_overridden",
",",
"accrued_interest",
"=",
"not_overridden",
",",
"buying_power",
"=",
"not_overridden",
",",
"equity_with_loan",
"=",
"not_overridden",
",",
"total_positions_value",
"=",
"not_overridden",
",",
"total_positions_exposure",
"=",
"not_overridden",
",",
"regt_equity",
"=",
"not_overridden",
",",
"regt_margin",
"=",
"not_overridden",
",",
"initial_margin_requirement",
"=",
"not_overridden",
",",
"maintenance_margin_requirement",
"=",
"not_overridden",
",",
"available_funds",
"=",
"not_overridden",
",",
"excess_liquidity",
"=",
"not_overridden",
",",
"cushion",
"=",
"not_overridden",
",",
"day_trades_remaining",
"=",
"not_overridden",
",",
"leverage",
"=",
"not_overridden",
",",
"net_leverage",
"=",
"not_overridden",
",",
"net_liquidation",
"=",
"not_overridden",
")",
":",
"# mark that the portfolio is dirty to override the fields again",
"self",
".",
"_dirty_account",
"=",
"True",
"self",
".",
"_account_overrides",
"=",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"locals",
"(",
")",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"not_overridden",
"}",
"del",
"kwargs",
"[",
"'self'",
"]"
] | Override fields on ``self.account``. | [
"Override",
"fields",
"on",
"self",
".",
"account",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L766-L791 |
26,131 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | new_dataset | def new_dataset(expr, missing_values, domain):
"""
Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type.
"""
missing_values = dict(missing_values)
class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1}
for name, type_ in expr.dshape.measure.fields:
# Don't generate a column for sid or timestamp, since they're
# implicitly the labels if the arrays that will be passed to pipeline
# Terms.
if name in (SID_FIELD_NAME, TS_FIELD_NAME):
continue
type_ = datashape_type_to_numpy(type_)
if can_represent_dtype(type_):
col = Column(
type_,
missing_values.get(name, NotSpecified),
)
else:
col = NonPipelineField(name, type_)
class_dict[name] = col
if 'domain' in class_dict:
raise ValueError("Got a column named 'domain' in new_dataset(). "
"'domain' is reserved.")
class_dict['domain'] = domain
name = expr._name
if name is None:
name = next(_new_names)
# unicode is a name error in py3 but the branch is only hit
# when we are in python 2.
if PY2 and isinstance(name, unicode): # pragma: no cover # noqa
name = name.encode('utf-8')
return type(name, (DataSet,), class_dict) | python | def new_dataset(expr, missing_values, domain):
"""
Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type.
"""
missing_values = dict(missing_values)
class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1}
for name, type_ in expr.dshape.measure.fields:
# Don't generate a column for sid or timestamp, since they're
# implicitly the labels if the arrays that will be passed to pipeline
# Terms.
if name in (SID_FIELD_NAME, TS_FIELD_NAME):
continue
type_ = datashape_type_to_numpy(type_)
if can_represent_dtype(type_):
col = Column(
type_,
missing_values.get(name, NotSpecified),
)
else:
col = NonPipelineField(name, type_)
class_dict[name] = col
if 'domain' in class_dict:
raise ValueError("Got a column named 'domain' in new_dataset(). "
"'domain' is reserved.")
class_dict['domain'] = domain
name = expr._name
if name is None:
name = next(_new_names)
# unicode is a name error in py3 but the branch is only hit
# when we are in python 2.
if PY2 and isinstance(name, unicode): # pragma: no cover # noqa
name = name.encode('utf-8')
return type(name, (DataSet,), class_dict) | [
"def",
"new_dataset",
"(",
"expr",
",",
"missing_values",
",",
"domain",
")",
":",
"missing_values",
"=",
"dict",
"(",
"missing_values",
")",
"class_dict",
"=",
"{",
"'ndim'",
":",
"2",
"if",
"SID_FIELD_NAME",
"in",
"expr",
".",
"fields",
"else",
"1",
"}",
"for",
"name",
",",
"type_",
"in",
"expr",
".",
"dshape",
".",
"measure",
".",
"fields",
":",
"# Don't generate a column for sid or timestamp, since they're",
"# implicitly the labels if the arrays that will be passed to pipeline",
"# Terms.",
"if",
"name",
"in",
"(",
"SID_FIELD_NAME",
",",
"TS_FIELD_NAME",
")",
":",
"continue",
"type_",
"=",
"datashape_type_to_numpy",
"(",
"type_",
")",
"if",
"can_represent_dtype",
"(",
"type_",
")",
":",
"col",
"=",
"Column",
"(",
"type_",
",",
"missing_values",
".",
"get",
"(",
"name",
",",
"NotSpecified",
")",
",",
")",
"else",
":",
"col",
"=",
"NonPipelineField",
"(",
"name",
",",
"type_",
")",
"class_dict",
"[",
"name",
"]",
"=",
"col",
"if",
"'domain'",
"in",
"class_dict",
":",
"raise",
"ValueError",
"(",
"\"Got a column named 'domain' in new_dataset(). \"",
"\"'domain' is reserved.\"",
")",
"class_dict",
"[",
"'domain'",
"]",
"=",
"domain",
"name",
"=",
"expr",
".",
"_name",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"next",
"(",
"_new_names",
")",
"# unicode is a name error in py3 but the branch is only hit",
"# when we are in python 2.",
"if",
"PY2",
"and",
"isinstance",
"(",
"name",
",",
"unicode",
")",
":",
"# pragma: no cover # noqa",
"name",
"=",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"type",
"(",
"name",
",",
"(",
"DataSet",
",",
")",
",",
"class_dict",
")"
] | Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
domain : zipline.pipeline.domain.Domain
Domain of the dataset to be created.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type. | [
"Creates",
"or",
"returns",
"a",
"dataset",
"from",
"a",
"blaze",
"expression",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L276-L334 |
26,132 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | _check_resources | def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
) | python | def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
) | [
"def",
"_check_resources",
"(",
"name",
",",
"expr",
",",
"resources",
")",
":",
"if",
"expr",
"is",
"None",
":",
"return",
"bound",
"=",
"expr",
".",
"_resources",
"(",
")",
"if",
"not",
"bound",
"and",
"resources",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'no resources provided to compute %s'",
"%",
"name",
")",
"if",
"bound",
"and",
"resources",
":",
"raise",
"ValueError",
"(",
"'explicit and implicit resources provided to compute %s'",
"%",
"name",
",",
")"
] | Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression. | [
"Validate",
"that",
"the",
"expression",
"and",
"resources",
"passed",
"match",
"up",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L337-L362 |
26,133 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | _check_datetime_field | def _check_datetime_field(name, measure):
"""Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``.
"""
if not isinstance(measure[name], (Date, DateTime)):
raise TypeError(
"'{name}' field must be a '{dt}', not: '{dshape}'".format(
name=name,
dt=DateTime(),
dshape=measure[name],
),
) | python | def _check_datetime_field(name, measure):
"""Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``.
"""
if not isinstance(measure[name], (Date, DateTime)):
raise TypeError(
"'{name}' field must be a '{dt}', not: '{dshape}'".format(
name=name,
dt=DateTime(),
dshape=measure[name],
),
) | [
"def",
"_check_datetime_field",
"(",
"name",
",",
"measure",
")",
":",
"if",
"not",
"isinstance",
"(",
"measure",
"[",
"name",
"]",
",",
"(",
"Date",
",",
"DateTime",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"'{name}' field must be a '{dt}', not: '{dshape}'\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"dt",
"=",
"DateTime",
"(",
")",
",",
"dshape",
"=",
"measure",
"[",
"name",
"]",
",",
")",
",",
")"
] | Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``. | [
"Check",
"that",
"a",
"field",
"is",
"a",
"datetime",
"inside",
"some",
"measure",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L365-L387 |
26,134 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | _get_metadata | def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
return expr._child['_'.join(((expr._name or ''), field))]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None | python | def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
return expr._child['_'.join(((expr._name or ''), field))]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None | [
"def",
"_get_metadata",
"(",
"field",
",",
"expr",
",",
"metadata_expr",
",",
"no_metadata_rule",
")",
":",
"if",
"isinstance",
"(",
"metadata_expr",
",",
"bz",
".",
"Expr",
")",
"or",
"metadata_expr",
"is",
"None",
":",
"return",
"metadata_expr",
"try",
":",
"return",
"expr",
".",
"_child",
"[",
"'_'",
".",
"join",
"(",
"(",
"(",
"expr",
".",
"_name",
"or",
"''",
")",
",",
"field",
")",
")",
"]",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"if",
"no_metadata_rule",
"==",
"'raise'",
":",
"raise",
"ValueError",
"(",
"\"no %s table could be reflected for %s\"",
"%",
"(",
"field",
",",
"expr",
")",
")",
"elif",
"no_metadata_rule",
"==",
"'warn'",
":",
"warnings",
".",
"warn",
"(",
"NoMetaDataWarning",
"(",
"expr",
",",
"field",
")",
",",
"stacklevel",
"=",
"4",
")",
"return",
"None"
] | Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use. | [
"Find",
"the",
"correct",
"metadata",
"expression",
"for",
"the",
"expression",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L415-L450 |
26,135 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | _ensure_timestamp_field | def _ensure_timestamp_field(dataset_expr, deltas, checkpoints):
"""Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use.
"""
measure = dataset_expr.dshape.measure
if TS_FIELD_NAME not in measure.names:
dataset_expr = bz.transform(
dataset_expr,
**{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}
)
deltas = _ad_as_ts(deltas)
checkpoints = _ad_as_ts(checkpoints)
else:
_check_datetime_field(TS_FIELD_NAME, measure)
return dataset_expr, deltas, checkpoints | python | def _ensure_timestamp_field(dataset_expr, deltas, checkpoints):
"""Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use.
"""
measure = dataset_expr.dshape.measure
if TS_FIELD_NAME not in measure.names:
dataset_expr = bz.transform(
dataset_expr,
**{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}
)
deltas = _ad_as_ts(deltas)
checkpoints = _ad_as_ts(checkpoints)
else:
_check_datetime_field(TS_FIELD_NAME, measure)
return dataset_expr, deltas, checkpoints | [
"def",
"_ensure_timestamp_field",
"(",
"dataset_expr",
",",
"deltas",
",",
"checkpoints",
")",
":",
"measure",
"=",
"dataset_expr",
".",
"dshape",
".",
"measure",
"if",
"TS_FIELD_NAME",
"not",
"in",
"measure",
".",
"names",
":",
"dataset_expr",
"=",
"bz",
".",
"transform",
"(",
"dataset_expr",
",",
"*",
"*",
"{",
"TS_FIELD_NAME",
":",
"dataset_expr",
"[",
"AD_FIELD_NAME",
"]",
"}",
")",
"deltas",
"=",
"_ad_as_ts",
"(",
"deltas",
")",
"checkpoints",
"=",
"_ad_as_ts",
"(",
"checkpoints",
")",
"else",
":",
"_check_datetime_field",
"(",
"TS_FIELD_NAME",
",",
"measure",
")",
"return",
"dataset_expr",
",",
"deltas",
",",
"checkpoints"
] | Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use. | [
"Verify",
"that",
"the",
"baseline",
"and",
"deltas",
"expressions",
"have",
"a",
"timestamp",
"field",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L473-L505 |
26,136 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | bind_expression_to_resources | def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
}) | python | def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
}) | [
"def",
"bind_expression_to_resources",
"(",
"expr",
",",
"resources",
")",
":",
"# bind the resources into the expression",
"if",
"resources",
"is",
"None",
":",
"resources",
"=",
"{",
"}",
"# _subs stands for substitute. It's not actually private, blaze just",
"# prefixes symbol-manipulation methods with underscores to prevent",
"# collisions with data column names.",
"return",
"expr",
".",
"_subs",
"(",
"{",
"k",
":",
"bz",
".",
"data",
"(",
"v",
",",
"dshape",
"=",
"k",
".",
"dshape",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"resources",
")",
"}",
")"
] | Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources. | [
"Bind",
"a",
"Blaze",
"expression",
"to",
"resources",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L1038-L1063 |
26,137 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | get_materialized_checkpoints | def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs):
"""
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
"""
if checkpoints is not None:
ts = checkpoints[TS_FIELD_NAME]
checkpoints_ts = odo(
ts[ts < lower_dt].max(),
pd.Timestamp,
**odo_kwargs
)
if pd.isnull(checkpoints_ts):
# We don't have a checkpoint for before our start date so just
# don't constrain the lower date.
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None
else:
materialized_checkpoints = odo(
checkpoints[ts == checkpoints_ts][colnames],
pd.DataFrame,
**odo_kwargs
)
lower = checkpoints_ts
else:
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None # we don't have a good lower date constraint
return lower, materialized_checkpoints | python | def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs):
"""
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
"""
if checkpoints is not None:
ts = checkpoints[TS_FIELD_NAME]
checkpoints_ts = odo(
ts[ts < lower_dt].max(),
pd.Timestamp,
**odo_kwargs
)
if pd.isnull(checkpoints_ts):
# We don't have a checkpoint for before our start date so just
# don't constrain the lower date.
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None
else:
materialized_checkpoints = odo(
checkpoints[ts == checkpoints_ts][colnames],
pd.DataFrame,
**odo_kwargs
)
lower = checkpoints_ts
else:
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None # we don't have a good lower date constraint
return lower, materialized_checkpoints | [
"def",
"get_materialized_checkpoints",
"(",
"checkpoints",
",",
"colnames",
",",
"lower_dt",
",",
"odo_kwargs",
")",
":",
"if",
"checkpoints",
"is",
"not",
"None",
":",
"ts",
"=",
"checkpoints",
"[",
"TS_FIELD_NAME",
"]",
"checkpoints_ts",
"=",
"odo",
"(",
"ts",
"[",
"ts",
"<",
"lower_dt",
"]",
".",
"max",
"(",
")",
",",
"pd",
".",
"Timestamp",
",",
"*",
"*",
"odo_kwargs",
")",
"if",
"pd",
".",
"isnull",
"(",
"checkpoints_ts",
")",
":",
"# We don't have a checkpoint for before our start date so just",
"# don't constrain the lower date.",
"materialized_checkpoints",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"colnames",
")",
"lower",
"=",
"None",
"else",
":",
"materialized_checkpoints",
"=",
"odo",
"(",
"checkpoints",
"[",
"ts",
"==",
"checkpoints_ts",
"]",
"[",
"colnames",
"]",
",",
"pd",
".",
"DataFrame",
",",
"*",
"*",
"odo_kwargs",
")",
"lower",
"=",
"checkpoints_ts",
"else",
":",
"materialized_checkpoints",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"colnames",
")",
"lower",
"=",
"None",
"# we don't have a good lower date constraint",
"return",
"lower",
",",
"materialized_checkpoints"
] | Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``. | [
"Computes",
"a",
"lower",
"bound",
"and",
"a",
"DataFrame",
"checkpoints",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L1066-L1105 |
26,138 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | ffill_query_in_range | def ffill_query_in_range(expr,
lower,
upper,
checkpoints=None,
odo_kwargs=None,
ts_field=TS_FIELD_NAME):
"""Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill.
"""
odo_kwargs = odo_kwargs or {}
computed_lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints,
expr.fields,
lower,
odo_kwargs,
)
pred = expr[ts_field] <= upper
if computed_lower is not None:
# only constrain the lower date if we computed a new lower date
pred &= expr[ts_field] >= computed_lower
raw = pd.concat(
(
materialized_checkpoints,
odo(
expr[pred],
pd.DataFrame,
**odo_kwargs
),
),
ignore_index=True,
)
raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]')
return raw | python | def ffill_query_in_range(expr,
lower,
upper,
checkpoints=None,
odo_kwargs=None,
ts_field=TS_FIELD_NAME):
"""Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill.
"""
odo_kwargs = odo_kwargs or {}
computed_lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints,
expr.fields,
lower,
odo_kwargs,
)
pred = expr[ts_field] <= upper
if computed_lower is not None:
# only constrain the lower date if we computed a new lower date
pred &= expr[ts_field] >= computed_lower
raw = pd.concat(
(
materialized_checkpoints,
odo(
expr[pred],
pd.DataFrame,
**odo_kwargs
),
),
ignore_index=True,
)
raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]')
return raw | [
"def",
"ffill_query_in_range",
"(",
"expr",
",",
"lower",
",",
"upper",
",",
"checkpoints",
"=",
"None",
",",
"odo_kwargs",
"=",
"None",
",",
"ts_field",
"=",
"TS_FIELD_NAME",
")",
":",
"odo_kwargs",
"=",
"odo_kwargs",
"or",
"{",
"}",
"computed_lower",
",",
"materialized_checkpoints",
"=",
"get_materialized_checkpoints",
"(",
"checkpoints",
",",
"expr",
".",
"fields",
",",
"lower",
",",
"odo_kwargs",
",",
")",
"pred",
"=",
"expr",
"[",
"ts_field",
"]",
"<=",
"upper",
"if",
"computed_lower",
"is",
"not",
"None",
":",
"# only constrain the lower date if we computed a new lower date",
"pred",
"&=",
"expr",
"[",
"ts_field",
"]",
">=",
"computed_lower",
"raw",
"=",
"pd",
".",
"concat",
"(",
"(",
"materialized_checkpoints",
",",
"odo",
"(",
"expr",
"[",
"pred",
"]",
",",
"pd",
".",
"DataFrame",
",",
"*",
"*",
"odo_kwargs",
")",
",",
")",
",",
"ignore_index",
"=",
"True",
",",
")",
"raw",
".",
"loc",
"[",
":",
",",
"ts_field",
"]",
"=",
"raw",
".",
"loc",
"[",
":",
",",
"ts_field",
"]",
".",
"astype",
"(",
"'datetime64[ns]'",
")",
"return",
"raw"
] | Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill. | [
"Query",
"a",
"blaze",
"expression",
"in",
"a",
"given",
"time",
"range",
"properly",
"forward",
"filling",
"from",
"values",
"that",
"fall",
"before",
"the",
"lower",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L1108-L1165 |
26,139 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | BlazeLoader.register_dataset | def register_dataset(self,
dataset,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
expr_data = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
for column in dataset.columns:
self._table_expressions[column] = expr_data | python | def register_dataset(self,
dataset,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
expr_data = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
for column in dataset.columns:
self._table_expressions[column] = expr_data | [
"def",
"register_dataset",
"(",
"self",
",",
"dataset",
",",
"expr",
",",
"deltas",
"=",
"None",
",",
"checkpoints",
"=",
"None",
",",
"odo_kwargs",
"=",
"None",
")",
":",
"expr_data",
"=",
"ExprData",
"(",
"expr",
",",
"deltas",
",",
"checkpoints",
",",
"odo_kwargs",
",",
")",
"for",
"column",
"in",
"dataset",
".",
"columns",
":",
"self",
".",
"_table_expressions",
"[",
"column",
"]",
"=",
"expr_data"
] | Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze` | [
"Explicitly",
"map",
"a",
"datset",
"to",
"a",
"collection",
"of",
"blaze",
"expressions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L847-L879 |
26,140 | quantopian/zipline | zipline/pipeline/loaders/blaze/core.py | BlazeLoader.register_column | def register_column(self,
column,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
self._table_expressions[column] = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
) | python | def register_column(self,
column,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
self._table_expressions[column] = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
) | [
"def",
"register_column",
"(",
"self",
",",
"column",
",",
"expr",
",",
"deltas",
"=",
"None",
",",
"checkpoints",
"=",
"None",
",",
"odo_kwargs",
"=",
"None",
")",
":",
"self",
".",
"_table_expressions",
"[",
"column",
"]",
"=",
"ExprData",
"(",
"expr",
",",
"deltas",
",",
"checkpoints",
",",
"odo_kwargs",
",",
")"
] | Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze` | [
"Explicitly",
"map",
"a",
"single",
"bound",
"column",
"to",
"a",
"collection",
"of",
"blaze",
"expressions",
".",
"The",
"expressions",
"need",
"to",
"have",
"timestamp",
"and",
"as_of",
"columns",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L881-L913 |
26,141 | quantopian/zipline | zipline/assets/assets.py | merge_ownership_periods | def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
) | python | def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
) | [
"def",
"merge_ownership_periods",
"(",
"mappings",
")",
":",
"return",
"valmap",
"(",
"lambda",
"v",
":",
"tuple",
"(",
"OwnershipPeriod",
"(",
"a",
".",
"start",
",",
"b",
".",
"start",
",",
"a",
".",
"sid",
",",
"a",
".",
"value",
",",
")",
"for",
"a",
",",
"b",
"in",
"sliding_window",
"(",
"2",
",",
"concatv",
"(",
"sorted",
"(",
"v",
")",
",",
"# concat with a fake ownership object to make the last",
"# end date be max timestamp",
"[",
"OwnershipPeriod",
"(",
"pd",
".",
"Timestamp",
".",
"max",
".",
"tz_localize",
"(",
"'utc'",
")",
",",
"None",
",",
"None",
",",
"None",
",",
")",
"]",
",",
")",
",",
")",
")",
",",
"mappings",
",",
")"
] | Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp. | [
"Given",
"a",
"dict",
"of",
"mappings",
"where",
"the",
"values",
"are",
"lists",
"of",
"OwnershipPeriod",
"objects",
"returns",
"a",
"dict",
"with",
"the",
"same",
"structure",
"with",
"new",
"OwnershipPeriod",
"objects",
"adjusted",
"so",
"that",
"the",
"periods",
"have",
"no",
"gaps",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L104-L138 |
26,142 | quantopian/zipline | zipline/assets/assets.py | build_ownership_map | def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
) | python | def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
) | [
"def",
"build_ownership_map",
"(",
"table",
",",
"key_from_row",
",",
"value_from_row",
")",
":",
"return",
"_build_ownership_map_from_rows",
"(",
"sa",
".",
"select",
"(",
"table",
".",
"c",
")",
".",
"execute",
"(",
")",
".",
"fetchall",
"(",
")",
",",
"key_from_row",
",",
"value_from_row",
",",
")"
] | Builds a dict mapping to lists of OwnershipPeriods, from a db table. | [
"Builds",
"a",
"dict",
"mapping",
"to",
"lists",
"of",
"OwnershipPeriods",
"from",
"a",
"db",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L159-L167 |
26,143 | quantopian/zipline | zipline/assets/assets.py | build_grouped_ownership_map | def build_grouped_ownership_map(table,
key_from_row,
value_from_row,
group_key):
"""
Builds a dict mapping group keys to maps of keys to to lists of
OwnershipPeriods, from a db table.
"""
grouped_rows = groupby(
group_key,
sa.select(table.c).execute().fetchall(),
)
return {
key: _build_ownership_map_from_rows(
rows,
key_from_row,
value_from_row,
)
for key, rows in grouped_rows.items()
} | python | def build_grouped_ownership_map(table,
key_from_row,
value_from_row,
group_key):
"""
Builds a dict mapping group keys to maps of keys to to lists of
OwnershipPeriods, from a db table.
"""
grouped_rows = groupby(
group_key,
sa.select(table.c).execute().fetchall(),
)
return {
key: _build_ownership_map_from_rows(
rows,
key_from_row,
value_from_row,
)
for key, rows in grouped_rows.items()
} | [
"def",
"build_grouped_ownership_map",
"(",
"table",
",",
"key_from_row",
",",
"value_from_row",
",",
"group_key",
")",
":",
"grouped_rows",
"=",
"groupby",
"(",
"group_key",
",",
"sa",
".",
"select",
"(",
"table",
".",
"c",
")",
".",
"execute",
"(",
")",
".",
"fetchall",
"(",
")",
",",
")",
"return",
"{",
"key",
":",
"_build_ownership_map_from_rows",
"(",
"rows",
",",
"key_from_row",
",",
"value_from_row",
",",
")",
"for",
"key",
",",
"rows",
"in",
"grouped_rows",
".",
"items",
"(",
")",
"}"
] | Builds a dict mapping group keys to maps of keys to to lists of
OwnershipPeriods, from a db table. | [
"Builds",
"a",
"dict",
"mapping",
"group",
"keys",
"to",
"maps",
"of",
"keys",
"to",
"to",
"lists",
"of",
"OwnershipPeriods",
"from",
"a",
"db",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L170-L189 |
26,144 | quantopian/zipline | zipline/assets/assets.py | _filter_kwargs | def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None} | python | def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None} | [
"def",
"_filter_kwargs",
"(",
"names",
",",
"dict_",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"dict_",
".",
"items",
"(",
")",
"if",
"k",
"in",
"names",
"and",
"v",
"is",
"not",
"None",
"}"
] | Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None. | [
"Filter",
"out",
"kwargs",
"from",
"a",
"dictionary",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L193-L209 |
26,145 | quantopian/zipline | zipline/assets/assets.py | _convert_asset_timestamp_fields | def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_ | python | def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_ | [
"def",
"_convert_asset_timestamp_fields",
"(",
"dict_",
")",
":",
"for",
"key",
"in",
"_asset_timestamp_fields",
"&",
"viewkeys",
"(",
"dict_",
")",
":",
"value",
"=",
"pd",
".",
"Timestamp",
"(",
"dict_",
"[",
"key",
"]",
",",
"tz",
"=",
"'UTC'",
")",
"dict_",
"[",
"key",
"]",
"=",
"None",
"if",
"isnull",
"(",
"value",
")",
"else",
"value",
"return",
"dict_"
] | Takes in a dict of Asset init args and converts dates to pd.Timestamps | [
"Takes",
"in",
"a",
"dict",
"of",
"Asset",
"init",
"args",
"and",
"converts",
"dates",
"to",
"pd",
".",
"Timestamps"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L216-L223 |
26,146 | quantopian/zipline | zipline/assets/assets.py | was_active | def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
) | python | def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
) | [
"def",
"was_active",
"(",
"reference_date_value",
",",
"asset",
")",
":",
"return",
"(",
"asset",
".",
"start_date",
".",
"value",
"<=",
"reference_date_value",
"<=",
"asset",
".",
"end_date",
".",
"value",
")"
] | Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time. | [
"Whether",
"or",
"not",
"asset",
"was",
"active",
"at",
"the",
"time",
"corresponding",
"to",
"reference_date_value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1568-L1591 |
26,147 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lookup_asset_types | def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found | python | def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found | [
"def",
"lookup_asset_types",
"(",
"self",
",",
"sids",
")",
":",
"found",
"=",
"{",
"}",
"missing",
"=",
"set",
"(",
")",
"for",
"sid",
"in",
"sids",
":",
"try",
":",
"found",
"[",
"sid",
"]",
"=",
"self",
".",
"_asset_type_cache",
"[",
"sid",
"]",
"except",
"KeyError",
":",
"missing",
".",
"add",
"(",
"sid",
")",
"if",
"not",
"missing",
":",
"return",
"found",
"router_cols",
"=",
"self",
".",
"asset_router",
".",
"c",
"for",
"assets",
"in",
"group_into_chunks",
"(",
"missing",
")",
":",
"query",
"=",
"sa",
".",
"select",
"(",
"(",
"router_cols",
".",
"sid",
",",
"router_cols",
".",
"asset_type",
")",
")",
".",
"where",
"(",
"self",
".",
"asset_router",
".",
"c",
".",
"sid",
".",
"in_",
"(",
"map",
"(",
"int",
",",
"assets",
")",
")",
")",
"for",
"sid",
",",
"type_",
"in",
"query",
".",
"execute",
"(",
")",
".",
"fetchall",
"(",
")",
":",
"missing",
".",
"remove",
"(",
"sid",
")",
"found",
"[",
"sid",
"]",
"=",
"self",
".",
"_asset_type_cache",
"[",
"sid",
"]",
"=",
"type_",
"for",
"sid",
"in",
"missing",
":",
"found",
"[",
"sid",
"]",
"=",
"self",
".",
"_asset_type_cache",
"[",
"sid",
"]",
"=",
"None",
"return",
"found"
] | Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids. | [
"Retrieve",
"asset",
"types",
"for",
"a",
"list",
"of",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L405-L443 |
26,148 | quantopian/zipline | zipline/assets/assets.py | AssetFinder._select_most_recent_symbols_chunk | def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
) | python | def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
cols = self.equity_symbol_mappings.c
# These are the columns we actually want.
data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns)
# Also select the max of end_date so that all non-grouped fields take
# on the value associated with the max end_date. The SQLite docs say
# this:
#
# When the min() or max() aggregate functions are used in an aggregate
# query, all bare columns in the result set take values from the input
# row which also contains the minimum or maximum. Only the built-in
# min() and max() functions work this way.
#
# See https://www.sqlite.org/lang_select.html#resultset, for more info.
to_select = data_cols + (sa.func.max(cols.end_date),)
return sa.select(
to_select,
).where(
cols.sid.in_(map(int, sid_group))
).group_by(
cols.sid,
) | [
"def",
"_select_most_recent_symbols_chunk",
"(",
"self",
",",
"sid_group",
")",
":",
"cols",
"=",
"self",
".",
"equity_symbol_mappings",
".",
"c",
"# These are the columns we actually want.",
"data_cols",
"=",
"(",
"cols",
".",
"sid",
",",
")",
"+",
"tuple",
"(",
"cols",
"[",
"name",
"]",
"for",
"name",
"in",
"symbol_columns",
")",
"# Also select the max of end_date so that all non-grouped fields take",
"# on the value associated with the max end_date. The SQLite docs say",
"# this:",
"#",
"# When the min() or max() aggregate functions are used in an aggregate",
"# query, all bare columns in the result set take values from the input",
"# row which also contains the minimum or maximum. Only the built-in",
"# min() and max() functions work this way.",
"#",
"# See https://www.sqlite.org/lang_select.html#resultset, for more info.",
"to_select",
"=",
"data_cols",
"+",
"(",
"sa",
".",
"func",
".",
"max",
"(",
"cols",
".",
"end_date",
")",
",",
")",
"return",
"sa",
".",
"select",
"(",
"to_select",
",",
")",
".",
"where",
"(",
"cols",
".",
"sid",
".",
"in_",
"(",
"map",
"(",
"int",
",",
"sid_group",
")",
")",
")",
".",
"group_by",
"(",
"cols",
".",
"sid",
",",
")"
] | Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids. | [
"Retrieve",
"the",
"most",
"recent",
"symbol",
"for",
"a",
"set",
"of",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L600-L647 |
26,149 | quantopian/zipline | zipline/assets/assets.py | AssetFinder._retrieve_assets | def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits | python | def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits | [
"def",
"_retrieve_assets",
"(",
"self",
",",
"sids",
",",
"asset_tbl",
",",
"asset_type",
")",
":",
"# Fastpath for empty request.",
"if",
"not",
"sids",
":",
"return",
"{",
"}",
"cache",
"=",
"self",
".",
"_asset_cache",
"hits",
"=",
"{",
"}",
"querying_equities",
"=",
"issubclass",
"(",
"asset_type",
",",
"Equity",
")",
"filter_kwargs",
"=",
"(",
"_filter_equity_kwargs",
"if",
"querying_equities",
"else",
"_filter_future_kwargs",
")",
"rows",
"=",
"self",
".",
"_retrieve_asset_dicts",
"(",
"sids",
",",
"asset_tbl",
",",
"querying_equities",
")",
"for",
"row",
"in",
"rows",
":",
"sid",
"=",
"row",
"[",
"'sid'",
"]",
"asset",
"=",
"asset_type",
"(",
"*",
"*",
"filter_kwargs",
"(",
"row",
")",
")",
"hits",
"[",
"sid",
"]",
"=",
"cache",
"[",
"sid",
"]",
"=",
"asset",
"# If we get here, it means something in our code thought that a",
"# particular sid was an equity/future and called this function with a",
"# concrete type, but we couldn't actually resolve the asset. This is",
"# an error in our code, not a user-input error.",
"misses",
"=",
"tuple",
"(",
"set",
"(",
"sids",
")",
"-",
"viewkeys",
"(",
"hits",
")",
")",
"if",
"misses",
":",
"if",
"querying_equities",
":",
"raise",
"EquitiesNotFound",
"(",
"sids",
"=",
"misses",
")",
"else",
":",
"raise",
"FutureContractsNotFound",
"(",
"sids",
"=",
"misses",
")",
"return",
"hits"
] | Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets. | [
"Internal",
"function",
"for",
"loading",
"assets",
"from",
"a",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L689-L740 |
26,150 | quantopian/zipline | zipline/assets/assets.py | AssetFinder._lookup_symbol_strict | def _lookup_symbol_strict(self,
ownership_map,
multi_country,
symbol,
as_of_date):
"""
Resolve a symbol to an asset object without fuzzy matching.
Parameters
----------
ownership_map : dict[(str, str), list[OwnershipPeriod]]
The mapping from split symbols to ownership periods.
multi_country : bool
Does this mapping span multiple countries?
symbol : str
The symbol to look up.
as_of_date : datetime or None
If multiple assets have held this sid, which day should the
resolution be checked against? If this value is None and multiple
sids have held the ticker, then a MultipleSymbolsFound error will
be raised.
Returns
-------
asset : Asset
The asset that held the given symbol.
Raises
------
SymbolNotFound
Raised when the symbol or symbol as_of_date pair do not map to
any assets.
MultipleSymbolsFound
Raised when multiple assets held the symbol. This happens if
multiple assets held the symbol at disjoint times and
``as_of_date`` is None, or if multiple assets held the symbol at
the same time and``multi_country`` is True.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If ``as_of_date`` is None:
- If more there is more than one owner, raise
``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
- Iterate through all of the owners:
- If the ``as_of_date`` is between the start and end of the ownership
period:
- If multi_country is False, return the found asset.
- Otherwise, put the asset in a list.
- At the end of the loop, if there are no candidate assets, raise a
``SymbolNotFound``.
- If there is exactly one candidate, return it.
- Othewise, raise ``MultipleSymbolsFound`` because the ticker is not
unique across countries.
"""
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = ownership_map[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
# exactly one equity has ever held this symbol, we may resolve
# without the date
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
if multi_country:
country_codes = map(attrgetter('country_code'), options)
if len(set(country_codes)) > 1:
raise SameSymbolUsedAcrossCountries(
symbol=symbol,
options=dict(zip(country_codes, options))
)
# more than one equity has held this ticker, this
# is ambiguous without the date
raise MultipleSymbolsFound(symbol=symbol, options=options)
options = []
country_codes = []
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
asset = self.retrieve_asset(sid)
# if this asset owned the symbol on this asof date and we are
# only searching one country, return that asset
if not multi_country:
return asset
else:
options.append(asset)
country_codes.append(asset.country_code)
if not options:
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
# if there is one valid option given the asof date, return that option
if len(options) == 1:
return options[0]
# if there's more than one option given the asof date, a country code
# must be passed to resolve the symbol to an asset
raise SameSymbolUsedAcrossCountries(
symbol=symbol,
options=dict(zip(country_codes, options))
) | python | def _lookup_symbol_strict(self,
ownership_map,
multi_country,
symbol,
as_of_date):
"""
Resolve a symbol to an asset object without fuzzy matching.
Parameters
----------
ownership_map : dict[(str, str), list[OwnershipPeriod]]
The mapping from split symbols to ownership periods.
multi_country : bool
Does this mapping span multiple countries?
symbol : str
The symbol to look up.
as_of_date : datetime or None
If multiple assets have held this sid, which day should the
resolution be checked against? If this value is None and multiple
sids have held the ticker, then a MultipleSymbolsFound error will
be raised.
Returns
-------
asset : Asset
The asset that held the given symbol.
Raises
------
SymbolNotFound
Raised when the symbol or symbol as_of_date pair do not map to
any assets.
MultipleSymbolsFound
Raised when multiple assets held the symbol. This happens if
multiple assets held the symbol at disjoint times and
``as_of_date`` is None, or if multiple assets held the symbol at
the same time and``multi_country`` is True.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If ``as_of_date`` is None:
- If more there is more than one owner, raise
``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
- Iterate through all of the owners:
- If the ``as_of_date`` is between the start and end of the ownership
period:
- If multi_country is False, return the found asset.
- Otherwise, put the asset in a list.
- At the end of the loop, if there are no candidate assets, raise a
``SymbolNotFound``.
- If there is exactly one candidate, return it.
- Othewise, raise ``MultipleSymbolsFound`` because the ticker is not
unique across countries.
"""
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = ownership_map[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
# exactly one equity has ever held this symbol, we may resolve
# without the date
if len(owners) == 1:
return self.retrieve_asset(owners[0].sid)
options = {self.retrieve_asset(owner.sid) for owner in owners}
if multi_country:
country_codes = map(attrgetter('country_code'), options)
if len(set(country_codes)) > 1:
raise SameSymbolUsedAcrossCountries(
symbol=symbol,
options=dict(zip(country_codes, options))
)
# more than one equity has held this ticker, this
# is ambiguous without the date
raise MultipleSymbolsFound(symbol=symbol, options=options)
options = []
country_codes = []
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
asset = self.retrieve_asset(sid)
# if this asset owned the symbol on this asof date and we are
# only searching one country, return that asset
if not multi_country:
return asset
else:
options.append(asset)
country_codes.append(asset.country_code)
if not options:
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
# if there is one valid option given the asof date, return that option
if len(options) == 1:
return options[0]
# if there's more than one option given the asof date, a country code
# must be passed to resolve the symbol to an asset
raise SameSymbolUsedAcrossCountries(
symbol=symbol,
options=dict(zip(country_codes, options))
) | [
"def",
"_lookup_symbol_strict",
"(",
"self",
",",
"ownership_map",
",",
"multi_country",
",",
"symbol",
",",
"as_of_date",
")",
":",
"# split the symbol into the components, if there are no",
"# company/share class parts then share_class_symbol will be empty",
"company_symbol",
",",
"share_class_symbol",
"=",
"split_delimited_symbol",
"(",
"symbol",
")",
"try",
":",
"owners",
"=",
"ownership_map",
"[",
"company_symbol",
",",
"share_class_symbol",
"]",
"assert",
"owners",
",",
"'empty owners list for %r'",
"%",
"symbol",
"except",
"KeyError",
":",
"# no equity has ever held this symbol",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"symbol",
")",
"if",
"not",
"as_of_date",
":",
"# exactly one equity has ever held this symbol, we may resolve",
"# without the date",
"if",
"len",
"(",
"owners",
")",
"==",
"1",
":",
"return",
"self",
".",
"retrieve_asset",
"(",
"owners",
"[",
"0",
"]",
".",
"sid",
")",
"options",
"=",
"{",
"self",
".",
"retrieve_asset",
"(",
"owner",
".",
"sid",
")",
"for",
"owner",
"in",
"owners",
"}",
"if",
"multi_country",
":",
"country_codes",
"=",
"map",
"(",
"attrgetter",
"(",
"'country_code'",
")",
",",
"options",
")",
"if",
"len",
"(",
"set",
"(",
"country_codes",
")",
")",
">",
"1",
":",
"raise",
"SameSymbolUsedAcrossCountries",
"(",
"symbol",
"=",
"symbol",
",",
"options",
"=",
"dict",
"(",
"zip",
"(",
"country_codes",
",",
"options",
")",
")",
")",
"# more than one equity has held this ticker, this",
"# is ambiguous without the date",
"raise",
"MultipleSymbolsFound",
"(",
"symbol",
"=",
"symbol",
",",
"options",
"=",
"options",
")",
"options",
"=",
"[",
"]",
"country_codes",
"=",
"[",
"]",
"for",
"start",
",",
"end",
",",
"sid",
",",
"_",
"in",
"owners",
":",
"if",
"start",
"<=",
"as_of_date",
"<",
"end",
":",
"# find the equity that owned it on the given asof date",
"asset",
"=",
"self",
".",
"retrieve_asset",
"(",
"sid",
")",
"# if this asset owned the symbol on this asof date and we are",
"# only searching one country, return that asset",
"if",
"not",
"multi_country",
":",
"return",
"asset",
"else",
":",
"options",
".",
"append",
"(",
"asset",
")",
"country_codes",
".",
"append",
"(",
"asset",
".",
"country_code",
")",
"if",
"not",
"options",
":",
"# no equity held the ticker on the given asof date",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"symbol",
")",
"# if there is one valid option given the asof date, return that option",
"if",
"len",
"(",
"options",
")",
"==",
"1",
":",
"return",
"options",
"[",
"0",
"]",
"# if there's more than one option given the asof date, a country code",
"# must be passed to resolve the symbol to an asset",
"raise",
"SameSymbolUsedAcrossCountries",
"(",
"symbol",
"=",
"symbol",
",",
"options",
"=",
"dict",
"(",
"zip",
"(",
"country_codes",
",",
"options",
")",
")",
")"
] | Resolve a symbol to an asset object without fuzzy matching.
Parameters
----------
ownership_map : dict[(str, str), list[OwnershipPeriod]]
The mapping from split symbols to ownership periods.
multi_country : bool
Does this mapping span multiple countries?
symbol : str
The symbol to look up.
as_of_date : datetime or None
If multiple assets have held this sid, which day should the
resolution be checked against? If this value is None and multiple
sids have held the ticker, then a MultipleSymbolsFound error will
be raised.
Returns
-------
asset : Asset
The asset that held the given symbol.
Raises
------
SymbolNotFound
Raised when the symbol or symbol as_of_date pair do not map to
any assets.
MultipleSymbolsFound
Raised when multiple assets held the symbol. This happens if
multiple assets held the symbol at disjoint times and
``as_of_date`` is None, or if multiple assets held the symbol at
the same time and``multi_country`` is True.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If ``as_of_date`` is None:
- If more there is more than one owner, raise
``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
- Iterate through all of the owners:
- If the ``as_of_date`` is between the start and end of the ownership
period:
- If multi_country is False, return the found asset.
- Otherwise, put the asset in a list.
- At the end of the loop, if there are no candidate assets, raise a
``SymbolNotFound``.
- If there is exactly one candidate, return it.
- Othewise, raise ``MultipleSymbolsFound`` because the ticker is not
unique across countries. | [
"Resolve",
"a",
"symbol",
"to",
"an",
"asset",
"object",
"without",
"fuzzy",
"matching",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L742-L865 |
26,151 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lookup_symbol | def lookup_symbol(self,
symbol,
as_of_date,
fuzzy=False,
country_code=None):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbol)
return f(
mapping,
country_code is None,
symbol,
as_of_date,
) | python | def lookup_symbol(self,
symbol,
as_of_date,
fuzzy=False,
country_code=None):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbol)
return f(
mapping,
country_code is None,
symbol,
as_of_date,
) | [
"def",
"lookup_symbol",
"(",
"self",
",",
"symbol",
",",
"as_of_date",
",",
"fuzzy",
"=",
"False",
",",
"country_code",
"=",
"None",
")",
":",
"if",
"symbol",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"Cannot lookup asset for symbol of None for \"",
"\"as of date %s.\"",
"%",
"as_of_date",
")",
"if",
"fuzzy",
":",
"f",
"=",
"self",
".",
"_lookup_symbol_fuzzy",
"mapping",
"=",
"self",
".",
"_choose_fuzzy_symbol_ownership_map",
"(",
"country_code",
")",
"else",
":",
"f",
"=",
"self",
".",
"_lookup_symbol_strict",
"mapping",
"=",
"self",
".",
"_choose_symbol_ownership_map",
"(",
"country_code",
")",
"if",
"mapping",
"is",
"None",
":",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"symbol",
")",
"return",
"f",
"(",
"mapping",
",",
"country_code",
"is",
"None",
",",
"symbol",
",",
"as_of_date",
",",
")"
] | Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries. | [
"Lookup",
"an",
"equity",
"by",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L955-L1016 |
26,152 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lookup_symbols | def lookup_symbols(self,
symbols,
as_of_date,
fuzzy=False,
country_code=None):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equities : list[Equity]
"""
if not symbols:
return []
multi_country = country_code is None
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbols[0])
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = f(
mapping,
multi_country,
sym,
as_of_date,
)
append_output(equity)
return out | python | def lookup_symbols(self,
symbols,
as_of_date,
fuzzy=False,
country_code=None):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equities : list[Equity]
"""
if not symbols:
return []
multi_country = country_code is None
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbols[0])
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = f(
mapping,
multi_country,
sym,
as_of_date,
)
append_output(equity)
return out | [
"def",
"lookup_symbols",
"(",
"self",
",",
"symbols",
",",
"as_of_date",
",",
"fuzzy",
"=",
"False",
",",
"country_code",
"=",
"None",
")",
":",
"if",
"not",
"symbols",
":",
"return",
"[",
"]",
"multi_country",
"=",
"country_code",
"is",
"None",
"if",
"fuzzy",
":",
"f",
"=",
"self",
".",
"_lookup_symbol_fuzzy",
"mapping",
"=",
"self",
".",
"_choose_fuzzy_symbol_ownership_map",
"(",
"country_code",
")",
"else",
":",
"f",
"=",
"self",
".",
"_lookup_symbol_strict",
"mapping",
"=",
"self",
".",
"_choose_symbol_ownership_map",
"(",
"country_code",
")",
"if",
"mapping",
"is",
"None",
":",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"symbols",
"[",
"0",
"]",
")",
"memo",
"=",
"{",
"}",
"out",
"=",
"[",
"]",
"append_output",
"=",
"out",
".",
"append",
"for",
"sym",
"in",
"symbols",
":",
"if",
"sym",
"in",
"memo",
":",
"append_output",
"(",
"memo",
"[",
"sym",
"]",
")",
"else",
":",
"equity",
"=",
"memo",
"[",
"sym",
"]",
"=",
"f",
"(",
"mapping",
",",
"multi_country",
",",
"sym",
",",
"as_of_date",
",",
")",
"append_output",
"(",
"equity",
")",
"return",
"out"
] | Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equities : list[Equity] | [
"Lookup",
"a",
"list",
"of",
"equities",
"by",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1018-L1077 |
26,153 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lookup_future_symbol | def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid']) | python | def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid']) | [
"def",
"lookup_future_symbol",
"(",
"self",
",",
"symbol",
")",
":",
"data",
"=",
"self",
".",
"_select_asset_by_symbol",
"(",
"self",
".",
"futures_contracts",
",",
"symbol",
")",
".",
"execute",
"(",
")",
".",
"fetchone",
"(",
")",
"# If no data found, raise an exception",
"if",
"not",
"data",
":",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"symbol",
")",
"return",
"self",
".",
"retrieve_asset",
"(",
"data",
"[",
"'sid'",
"]",
")"
] | Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found. | [
"Lookup",
"a",
"future",
"contract",
"by",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1079-L1105 |
26,154 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.get_supplementary_field | def get_supplementary_field(self, sid, field_name, as_of_date):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid) | python | def get_supplementary_field(self, sid, field_name, as_of_date):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid) | [
"def",
"get_supplementary_field",
"(",
"self",
",",
"sid",
",",
"field_name",
",",
"as_of_date",
")",
":",
"try",
":",
"periods",
"=",
"self",
".",
"equity_supplementary_map_by_sid",
"[",
"field_name",
",",
"sid",
",",
"]",
"assert",
"periods",
",",
"'empty periods list for %r'",
"%",
"(",
"field_name",
",",
"sid",
")",
"except",
"KeyError",
":",
"raise",
"NoValueForSid",
"(",
"field",
"=",
"field_name",
",",
"sid",
"=",
"sid",
")",
"if",
"not",
"as_of_date",
":",
"if",
"len",
"(",
"periods",
")",
">",
"1",
":",
"# This equity has held more than one value, this is ambigious",
"# without the date",
"raise",
"MultipleValuesFoundForSid",
"(",
"field",
"=",
"field_name",
",",
"sid",
"=",
"sid",
",",
"options",
"=",
"{",
"p",
".",
"value",
"for",
"p",
"in",
"periods",
"}",
",",
")",
"# this equity has only ever held this value, we may resolve",
"# without the date",
"return",
"periods",
"[",
"0",
"]",
".",
"value",
"for",
"start",
",",
"end",
",",
"_",
",",
"value",
"in",
"periods",
":",
"if",
"start",
"<=",
"as_of_date",
"<",
"end",
":",
"return",
"value",
"# Could not find a value for this sid on the as_of_date.",
"raise",
"NoValueForSid",
"(",
"field",
"=",
"field_name",
",",
"sid",
"=",
"sid",
")"
] | Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date. | [
"Get",
"the",
"value",
"of",
"a",
"supplementary",
"field",
"for",
"an",
"asset",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1142-L1193 |
26,155 | quantopian/zipline | zipline/assets/assets.py | AssetFinder._lookup_generic_scalar | def _lookup_generic_scalar(self,
obj,
as_of_date,
country_code,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
result = self._lookup_generic_scalar_helper(
obj, as_of_date, country_code,
)
if result is not None:
matches.append(result)
else:
missing.append(obj) | python | def _lookup_generic_scalar(self,
obj,
as_of_date,
country_code,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
result = self._lookup_generic_scalar_helper(
obj, as_of_date, country_code,
)
if result is not None:
matches.append(result)
else:
missing.append(obj) | [
"def",
"_lookup_generic_scalar",
"(",
"self",
",",
"obj",
",",
"as_of_date",
",",
"country_code",
",",
"matches",
",",
"missing",
")",
":",
"result",
"=",
"self",
".",
"_lookup_generic_scalar_helper",
"(",
"obj",
",",
"as_of_date",
",",
"country_code",
",",
")",
"if",
"result",
"is",
"not",
"None",
":",
"matches",
".",
"append",
"(",
"result",
")",
"else",
":",
"missing",
".",
"append",
"(",
"obj",
")"
] | Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing. | [
"Convert",
"asset_convertible",
"to",
"an",
"asset",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1298-L1316 |
26,156 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lookup_generic | def lookup_generic(self, obj, as_of_date, country_code):
"""
Convert an object into an Asset or sequence of Assets.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Parameters
----------
obj : int, str, Asset, ContinuousFuture, or iterable
The object to be converted into one or more Assets.
Integers are interpreted as sids. Strings are interpreted as
tickers. Assets and ContinuousFutures are returned unchanged.
as_of_date : pd.Timestamp or None
Timestamp to use to disambiguate ticker lookups. Has the same
semantics as in `lookup_symbol`.
country_code : str or None
ISO-3166 country code to use to disambiguate ticker lookups. Has
the same semantics as in `lookup_symbol`.
Returns
-------
matches, missing : tuple
``matches`` is the result of the conversion. ``missing`` is a list
containing any values that couldn't be resolved. If ``obj`` is not
an iterable, ``missing`` will be an empty list.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(obj, (AssetConvertible, ContinuousFuture)):
self._lookup_generic_scalar(
obj=obj,
as_of_date=as_of_date,
country_code=country_code,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(obj, '__int__'):
raise SidsNotFound(sids=[obj])
else:
raise SymbolNotFound(symbol=obj)
# Interpret input as iterable.
try:
iterator = iter(obj)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(
obj=obj,
as_of_date=as_of_date,
country_code=country_code,
matches=matches,
missing=missing,
)
return matches, missing | python | def lookup_generic(self, obj, as_of_date, country_code):
"""
Convert an object into an Asset or sequence of Assets.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Parameters
----------
obj : int, str, Asset, ContinuousFuture, or iterable
The object to be converted into one or more Assets.
Integers are interpreted as sids. Strings are interpreted as
tickers. Assets and ContinuousFutures are returned unchanged.
as_of_date : pd.Timestamp or None
Timestamp to use to disambiguate ticker lookups. Has the same
semantics as in `lookup_symbol`.
country_code : str or None
ISO-3166 country code to use to disambiguate ticker lookups. Has
the same semantics as in `lookup_symbol`.
Returns
-------
matches, missing : tuple
``matches`` is the result of the conversion. ``missing`` is a list
containing any values that couldn't be resolved. If ``obj`` is not
an iterable, ``missing`` will be an empty list.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(obj, (AssetConvertible, ContinuousFuture)):
self._lookup_generic_scalar(
obj=obj,
as_of_date=as_of_date,
country_code=country_code,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(obj, '__int__'):
raise SidsNotFound(sids=[obj])
else:
raise SymbolNotFound(symbol=obj)
# Interpret input as iterable.
try:
iterator = iter(obj)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(
obj=obj,
as_of_date=as_of_date,
country_code=country_code,
matches=matches,
missing=missing,
)
return matches, missing | [
"def",
"lookup_generic",
"(",
"self",
",",
"obj",
",",
"as_of_date",
",",
"country_code",
")",
":",
"matches",
"=",
"[",
"]",
"missing",
"=",
"[",
"]",
"# Interpret input as scalar.",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"AssetConvertible",
",",
"ContinuousFuture",
")",
")",
":",
"self",
".",
"_lookup_generic_scalar",
"(",
"obj",
"=",
"obj",
",",
"as_of_date",
"=",
"as_of_date",
",",
"country_code",
"=",
"country_code",
",",
"matches",
"=",
"matches",
",",
"missing",
"=",
"missing",
",",
")",
"try",
":",
"return",
"matches",
"[",
"0",
"]",
",",
"missing",
"except",
"IndexError",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'__int__'",
")",
":",
"raise",
"SidsNotFound",
"(",
"sids",
"=",
"[",
"obj",
"]",
")",
"else",
":",
"raise",
"SymbolNotFound",
"(",
"symbol",
"=",
"obj",
")",
"# Interpret input as iterable.",
"try",
":",
"iterator",
"=",
"iter",
"(",
"obj",
")",
"except",
"TypeError",
":",
"raise",
"NotAssetConvertible",
"(",
"\"Input was not a AssetConvertible \"",
"\"or iterable of AssetConvertible.\"",
")",
"for",
"obj",
"in",
"iterator",
":",
"self",
".",
"_lookup_generic_scalar",
"(",
"obj",
"=",
"obj",
",",
"as_of_date",
"=",
"as_of_date",
",",
"country_code",
"=",
"country_code",
",",
"matches",
"=",
"matches",
",",
"missing",
"=",
"missing",
",",
")",
"return",
"matches",
",",
"missing"
] | Convert an object into an Asset or sequence of Assets.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Parameters
----------
obj : int, str, Asset, ContinuousFuture, or iterable
The object to be converted into one or more Assets.
Integers are interpreted as sids. Strings are interpreted as
tickers. Assets and ContinuousFutures are returned unchanged.
as_of_date : pd.Timestamp or None
Timestamp to use to disambiguate ticker lookups. Has the same
semantics as in `lookup_symbol`.
country_code : str or None
ISO-3166 country code to use to disambiguate ticker lookups. Has
the same semantics as in `lookup_symbol`.
Returns
-------
matches, missing : tuple
``matches`` is the result of the conversion. ``missing`` is a list
containing any values that couldn't be resolved. If ``obj`` is not
an iterable, ``missing`` will be an empty list. | [
"Convert",
"an",
"object",
"into",
"an",
"Asset",
"or",
"sequence",
"of",
"Assets",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1347-L1414 |
26,157 | quantopian/zipline | zipline/assets/assets.py | AssetFinder._compute_asset_lifetimes | def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
]) | python | def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
]) | [
"def",
"_compute_asset_lifetimes",
"(",
"self",
",",
"country_codes",
")",
":",
"equities_cols",
"=",
"self",
".",
"equities",
".",
"c",
"if",
"country_codes",
":",
"buf",
"=",
"np",
".",
"array",
"(",
"tuple",
"(",
"sa",
".",
"select",
"(",
"(",
"equities_cols",
".",
"sid",
",",
"equities_cols",
".",
"start_date",
",",
"equities_cols",
".",
"end_date",
",",
")",
")",
".",
"where",
"(",
"(",
"self",
".",
"exchanges",
".",
"c",
".",
"exchange",
"==",
"equities_cols",
".",
"exchange",
")",
"&",
"(",
"self",
".",
"exchanges",
".",
"c",
".",
"country_code",
".",
"in_",
"(",
"country_codes",
")",
")",
")",
".",
"execute",
"(",
")",
",",
")",
",",
"dtype",
"=",
"'f8'",
",",
"# use doubles so we get NaNs",
")",
"else",
":",
"buf",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'f8'",
")",
"lifetimes",
"=",
"np",
".",
"recarray",
"(",
"buf",
"=",
"buf",
",",
"shape",
"=",
"(",
"len",
"(",
"buf",
")",
",",
")",
",",
"dtype",
"=",
"[",
"(",
"'sid'",
",",
"'f8'",
")",
",",
"(",
"'start'",
",",
"'f8'",
")",
",",
"(",
"'end'",
",",
"'f8'",
")",
"]",
",",
")",
"start",
"=",
"lifetimes",
".",
"start",
"end",
"=",
"lifetimes",
".",
"end",
"start",
"[",
"np",
".",
"isnan",
"(",
"start",
")",
"]",
"=",
"0",
"# convert missing starts to 0",
"end",
"[",
"np",
".",
"isnan",
"(",
"end",
")",
"]",
"=",
"np",
".",
"iinfo",
"(",
"int",
")",
".",
"max",
"# convert missing end to INTMAX",
"# Cast the results back down to int.",
"return",
"lifetimes",
".",
"astype",
"(",
"[",
"(",
"'sid'",
",",
"'i8'",
")",
",",
"(",
"'start'",
",",
"'i8'",
")",
",",
"(",
"'end'",
",",
"'i8'",
")",
",",
"]",
")"
] | Compute and cache a recarray of asset lifetimes. | [
"Compute",
"and",
"cache",
"a",
"recarray",
"of",
"asset",
"lifetimes",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1416-L1456 |
26,158 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.lifetimes | def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) | python | def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
if isinstance(country_codes, string_types):
raise TypeError(
"Got string {!r} instead of an iterable of strings in "
"AssetFinder.lifetimes.".format(country_codes),
)
# normalize to a cache-key so that we can memoize results.
country_codes = frozenset(country_codes)
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) | [
"def",
"lifetimes",
"(",
"self",
",",
"dates",
",",
"include_start_date",
",",
"country_codes",
")",
":",
"if",
"isinstance",
"(",
"country_codes",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Got string {!r} instead of an iterable of strings in \"",
"\"AssetFinder.lifetimes.\"",
".",
"format",
"(",
"country_codes",
")",
",",
")",
"# normalize to a cache-key so that we can memoize results.",
"country_codes",
"=",
"frozenset",
"(",
"country_codes",
")",
"lifetimes",
"=",
"self",
".",
"_asset_lifetimes",
".",
"get",
"(",
"country_codes",
")",
"if",
"lifetimes",
"is",
"None",
":",
"self",
".",
"_asset_lifetimes",
"[",
"country_codes",
"]",
"=",
"lifetimes",
"=",
"(",
"self",
".",
"_compute_asset_lifetimes",
"(",
"country_codes",
")",
")",
"raw_dates",
"=",
"as_column",
"(",
"dates",
".",
"asi8",
")",
"if",
"include_start_date",
":",
"mask",
"=",
"lifetimes",
".",
"start",
"<=",
"raw_dates",
"else",
":",
"mask",
"=",
"lifetimes",
".",
"start",
"<",
"raw_dates",
"mask",
"&=",
"(",
"raw_dates",
"<=",
"lifetimes",
".",
"end",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"mask",
",",
"index",
"=",
"dates",
",",
"columns",
"=",
"lifetimes",
".",
"sid",
")"
] | Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask | [
"Compute",
"a",
"DataFrame",
"representing",
"asset",
"lifetimes",
"for",
"the",
"specified",
"date",
"range",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1458-L1514 |
26,159 | quantopian/zipline | zipline/assets/assets.py | AssetFinder.equities_sids_for_country_code | def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist()) | python | def equities_sids_for_country_code(self, country_code):
"""Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country.
"""
sids = self._compute_asset_lifetimes([country_code]).sid
return tuple(sids.tolist()) | [
"def",
"equities_sids_for_country_code",
"(",
"self",
",",
"country_code",
")",
":",
"sids",
"=",
"self",
".",
"_compute_asset_lifetimes",
"(",
"[",
"country_code",
"]",
")",
".",
"sid",
"return",
"tuple",
"(",
"sids",
".",
"tolist",
"(",
")",
")"
] | Return all of the sids for a given country.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code.
Returns
-------
tuple[int]
The sids whose exchanges are in this country. | [
"Return",
"all",
"of",
"the",
"sids",
"for",
"a",
"given",
"country",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/assets.py#L1516-L1530 |
26,160 | quantopian/zipline | zipline/data/continuous_future_reader.py | ContinuousFutureSessionBarReader.get_last_traded_dt | def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt) | python | def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt) | [
"def",
"get_last_traded_dt",
"(",
"self",
",",
"asset",
",",
"dt",
")",
":",
"rf",
"=",
"self",
".",
"_roll_finders",
"[",
"asset",
".",
"roll_style",
"]",
"sid",
"=",
"(",
"rf",
".",
"get_contract_center",
"(",
"asset",
".",
"root_symbol",
",",
"dt",
",",
"asset",
".",
"offset",
")",
")",
"if",
"sid",
"is",
"None",
":",
"return",
"pd",
".",
"NaT",
"contract",
"=",
"rf",
".",
"asset_finder",
".",
"retrieve_asset",
"(",
"sid",
")",
"return",
"self",
".",
"_bar_reader",
".",
"get_last_traded_dt",
"(",
"contract",
",",
"dt",
")"
] | Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point. | [
"Get",
"the",
"latest",
"minute",
"on",
"or",
"before",
"dt",
"in",
"which",
"asset",
"traded",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/continuous_future_reader.py#L158-L184 |
26,161 | quantopian/zipline | zipline/protocol.py | Portfolio.current_portfolio_weights | def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series({
asset: (
position.last_sale_price *
position.amount *
asset.price_multiplier
)
for asset, position in self.positions.items()
})
return position_values / self.portfolio_value | python | def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series({
asset: (
position.last_sale_price *
position.amount *
asset.price_multiplier
)
for asset, position in self.positions.items()
})
return position_values / self.portfolio_value | [
"def",
"current_portfolio_weights",
"(",
"self",
")",
":",
"position_values",
"=",
"pd",
".",
"Series",
"(",
"{",
"asset",
":",
"(",
"position",
".",
"last_sale_price",
"*",
"position",
".",
"amount",
"*",
"asset",
".",
"price_multiplier",
")",
"for",
"asset",
",",
"position",
"in",
"self",
".",
"positions",
".",
"items",
"(",
")",
"}",
")",
"return",
"position_values",
"/",
"self",
".",
"portfolio_value"
] | Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier. | [
"Compute",
"each",
"asset",
"s",
"weight",
"in",
"the",
"portfolio",
"by",
"calculating",
"its",
"held",
"value",
"divided",
"by",
"the",
"total",
"value",
"of",
"all",
"positions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/protocol.py#L216-L233 |
26,162 | tensorflow/datasets | tensorflow_datasets/image/sun.py | _decode_image | def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image | python | def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image | [
"def",
"_decode_image",
"(",
"fobj",
",",
"session",
",",
"filename",
")",
":",
"buf",
"=",
"fobj",
".",
"read",
"(",
")",
"image",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"cv2",
".",
"imdecode",
"(",
"np",
".",
"fromstring",
"(",
"buf",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
",",
"flags",
"=",
"3",
")",
"# Note: Converts to RGB.",
"if",
"image",
"is",
"None",
":",
"logging",
".",
"warning",
"(",
"\"Image %s could not be decoded by OpenCV, falling back to TF\"",
",",
"filename",
")",
"try",
":",
"image",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"buf",
",",
"channels",
"=",
"3",
")",
"image",
"=",
"session",
".",
"run",
"(",
"image",
")",
"except",
"tf",
".",
"errors",
".",
"InvalidArgumentError",
":",
"logging",
".",
"fatal",
"(",
"\"Image %s could not be decoded by Tensorflow\"",
",",
"filename",
")",
"# The GIF images contain a single frame.",
"if",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"4",
":",
"# rank=4 -> rank=3",
"image",
"=",
"image",
".",
"reshape",
"(",
"image",
".",
"shape",
"[",
"1",
":",
"]",
")",
"return",
"image"
] | Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels). | [
"Reads",
"and",
"decodes",
"an",
"image",
"from",
"a",
"file",
"object",
"as",
"a",
"Numpy",
"array",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L65-L102 |
26,163 | tensorflow/datasets | tensorflow_datasets/image/sun.py | _process_image_file | def _process_image_file(fobj, session, filename):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj, session, filename=filename)
return _encode_jpeg(image) | python | def _process_image_file(fobj, session, filename):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj, session, filename=filename)
return _encode_jpeg(image) | [
"def",
"_process_image_file",
"(",
"fobj",
",",
"session",
",",
"filename",
")",
":",
"# We need to read the image files and convert them to JPEG, since some files",
"# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and",
"# some encoding options that will make TF crash in general.",
"image",
"=",
"_decode_image",
"(",
"fobj",
",",
"session",
",",
"filename",
"=",
"filename",
")",
"return",
"_encode_jpeg",
"(",
"image",
")"
] | Process image files from the dataset. | [
"Process",
"image",
"files",
"from",
"the",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/sun.py#L113-L119 |
26,164 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | _parse_parallel_sentences | def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with tf.io.gfile.GFile(path) as f, gzip.GzipFile(fileobj=f) as g:
return g.read().split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with tf.io.gfile.GFile(path) as f:
return f.read().split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with tf.io.gfile.GFile(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = tf.io.gfile.glob(f1)
f2_files = tf.io.gfile.glob(f2)
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), (
"Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files), len(f2_files), f1, f2))
for f1_i, f2_i in zip(sorted(f1_files), sorted(f2_files)):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), (
"Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences), len(l2_sentences), f1_i, f2_i))
for s1, s2 in zip(l1_sentences, l2_sentences):
yield {
l1: s1,
l2: s2
} | python | def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.split(".")
if split_path[-1] == "gz":
lang = split_path[-2]
with tf.io.gfile.GFile(path) as f, gzip.GzipFile(fileobj=f) as g:
return g.read().split("\n"), lang
if split_path[-1] == "txt":
# CWMT
lang = split_path[-2].split("_")[-1]
lang = "zh" if lang in ("ch", "cn") else lang
else:
lang = split_path[-1]
with tf.io.gfile.GFile(path) as f:
return f.read().split("\n"), lang
def _parse_sgm(path):
"""Returns sentences from a single SGML file."""
lang = path.split(".")[-2]
sentences = []
# Note: We can't use the XML parser since some of the files are badly
# formatted.
seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
with tf.io.gfile.GFile(path) as f:
for line in f:
seg_match = re.match(seg_re, line)
if seg_match:
assert len(seg_match.groups()) == 1
sentences.append(seg_match.groups()[0])
return sentences, lang
parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
# Some datasets (e.g., CWMT) contain multiple parallel files specified with
# a wildcard. We sort both sets to align them and parse them one by one.
f1_files = tf.io.gfile.glob(f1)
f2_files = tf.io.gfile.glob(f2)
assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
assert len(f1_files) == len(f2_files), (
"Number of files do not match: %d vs %d for %s vs %s." % (
len(f1_files), len(f2_files), f1, f2))
for f1_i, f2_i in zip(sorted(f1_files), sorted(f2_files)):
l1_sentences, l1 = parse_file(f1_i)
l2_sentences, l2 = parse_file(f2_i)
assert len(l1_sentences) == len(l2_sentences), (
"Sizes do not match: %d vs %d for %s vs %s." % (
len(l1_sentences), len(l2_sentences), f1_i, f2_i))
for s1, s2 in zip(l1_sentences, l2_sentences):
yield {
l1: s1,
l2: s2
} | [
"def",
"_parse_parallel_sentences",
"(",
"f1",
",",
"f2",
")",
":",
"def",
"_parse_text",
"(",
"path",
")",
":",
"\"\"\"Returns the sentences from a single text file, which may be gzipped.\"\"\"",
"split_path",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"if",
"split_path",
"[",
"-",
"1",
"]",
"==",
"\"gz\"",
":",
"lang",
"=",
"split_path",
"[",
"-",
"2",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
",",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"f",
")",
"as",
"g",
":",
"return",
"g",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"lang",
"if",
"split_path",
"[",
"-",
"1",
"]",
"==",
"\"txt\"",
":",
"# CWMT",
"lang",
"=",
"split_path",
"[",
"-",
"2",
"]",
".",
"split",
"(",
"\"_\"",
")",
"[",
"-",
"1",
"]",
"lang",
"=",
"\"zh\"",
"if",
"lang",
"in",
"(",
"\"ch\"",
",",
"\"cn\"",
")",
"else",
"lang",
"else",
":",
"lang",
"=",
"split_path",
"[",
"-",
"1",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
",",
"lang",
"def",
"_parse_sgm",
"(",
"path",
")",
":",
"\"\"\"Returns sentences from a single SGML file.\"\"\"",
"lang",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"2",
"]",
"sentences",
"=",
"[",
"]",
"# Note: We can't use the XML parser since some of the files are badly",
"# formatted.",
"seg_re",
"=",
"re",
".",
"compile",
"(",
"r\"<seg id=\\\"\\d+\\\">(.*)</seg>\"",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"seg_match",
"=",
"re",
".",
"match",
"(",
"seg_re",
",",
"line",
")",
"if",
"seg_match",
":",
"assert",
"len",
"(",
"seg_match",
".",
"groups",
"(",
")",
")",
"==",
"1",
"sentences",
".",
"append",
"(",
"seg_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"return",
"sentences",
",",
"lang",
"parse_file",
"=",
"_parse_sgm",
"if",
"f1",
".",
"endswith",
"(",
"\".sgm\"",
")",
"else",
"_parse_text",
"# Some datasets (e.g., CWMT) contain multiple parallel files specified with",
"# a wildcard. We sort both sets to align them and parse them one by one.",
"f1_files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"f1",
")",
"f2_files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"f2",
")",
"assert",
"f1_files",
"and",
"f2_files",
",",
"\"No matching files found: %s, %s.\"",
"%",
"(",
"f1",
",",
"f2",
")",
"assert",
"len",
"(",
"f1_files",
")",
"==",
"len",
"(",
"f2_files",
")",
",",
"(",
"\"Number of files do not match: %d vs %d for %s vs %s.\"",
"%",
"(",
"len",
"(",
"f1_files",
")",
",",
"len",
"(",
"f2_files",
")",
",",
"f1",
",",
"f2",
")",
")",
"for",
"f1_i",
",",
"f2_i",
"in",
"zip",
"(",
"sorted",
"(",
"f1_files",
")",
",",
"sorted",
"(",
"f2_files",
")",
")",
":",
"l1_sentences",
",",
"l1",
"=",
"parse_file",
"(",
"f1_i",
")",
"l2_sentences",
",",
"l2",
"=",
"parse_file",
"(",
"f2_i",
")",
"assert",
"len",
"(",
"l1_sentences",
")",
"==",
"len",
"(",
"l2_sentences",
")",
",",
"(",
"\"Sizes do not match: %d vs %d for %s vs %s.\"",
"%",
"(",
"len",
"(",
"l1_sentences",
")",
",",
"len",
"(",
"l2_sentences",
")",
",",
"f1_i",
",",
"f2_i",
")",
")",
"for",
"s1",
",",
"s2",
"in",
"zip",
"(",
"l1_sentences",
",",
"l2_sentences",
")",
":",
"yield",
"{",
"l1",
":",
"s1",
",",
"l2",
":",
"s2",
"}"
] | Returns examples from parallel SGML or text files, which may be gzipped. | [
"Returns",
"examples",
"from",
"parallel",
"SGML",
"or",
"text",
"files",
"which",
"may",
"be",
"gzipped",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L761-L820 |
26,165 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | _parse_tmx | def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with tf.io.gfile.GFile(path) as f:
for _, elem in ElementTree.iterparse(f):
if elem.tag == "tu":
yield {
_get_tuv_lang(tuv):
_get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")
}
elem.clear() | python | def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with tf.io.gfile.GFile(path) as f:
for _, elem in ElementTree.iterparse(f):
if elem.tag == "tu":
yield {
_get_tuv_lang(tuv):
_get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")
}
elem.clear() | [
"def",
"_parse_tmx",
"(",
"path",
")",
":",
"def",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"for",
"k",
",",
"v",
"in",
"tuv",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"endswith",
"(",
"\"}lang\"",
")",
":",
"return",
"v",
"raise",
"AssertionError",
"(",
"\"Language not found in `tuv` attributes.\"",
")",
"def",
"_get_tuv_seg",
"(",
"tuv",
")",
":",
"segs",
"=",
"tuv",
".",
"findall",
"(",
"\"seg\"",
")",
"assert",
"len",
"(",
"segs",
")",
"==",
"1",
",",
"\"Invalid number of segments: %d\"",
"%",
"len",
"(",
"segs",
")",
"return",
"segs",
"[",
"0",
"]",
".",
"text",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"_",
",",
"elem",
"in",
"ElementTree",
".",
"iterparse",
"(",
"f",
")",
":",
"if",
"elem",
".",
"tag",
"==",
"\"tu\"",
":",
"yield",
"{",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"_get_tuv_seg",
"(",
"tuv",
")",
"for",
"tuv",
"in",
"elem",
".",
"iterfind",
"(",
"\"tuv\"",
")",
"}",
"elem",
".",
"clear",
"(",
")"
] | Generates examples from TMX file. | [
"Generates",
"examples",
"from",
"TMX",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L838-L858 |
26,166 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | _parse_tsv | def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with tf.io.gfile.GFile(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning(
"Skipping line %d in TSV (%s) with %d != 2 columns.",
j, path, len(cols))
continue
s1, s2 = cols
yield {
l1: s1.strip(),
l2: s2.strip()
} | python | def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
else:
l1, l2 = language_pair
with tf.io.gfile.GFile(path) as f:
for j, line in enumerate(f):
cols = line.split("\t")
if len(cols) != 2:
logging.warning(
"Skipping line %d in TSV (%s) with %d != 2 columns.",
j, path, len(cols))
continue
s1, s2 = cols
yield {
l1: s1.strip(),
l2: s2.strip()
} | [
"def",
"_parse_tsv",
"(",
"path",
",",
"language_pair",
"=",
"None",
")",
":",
"if",
"language_pair",
"is",
"None",
":",
"lang_match",
"=",
"re",
".",
"match",
"(",
"r\".*\\.([a-z][a-z])-([a-z][a-z])\\.tsv\"",
",",
"path",
")",
"assert",
"lang_match",
"is",
"not",
"None",
",",
"\"Invalid TSV filename: %s\"",
"%",
"path",
"l1",
",",
"l2",
"=",
"lang_match",
".",
"groups",
"(",
")",
"else",
":",
"l1",
",",
"l2",
"=",
"language_pair",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"j",
",",
"line",
"in",
"enumerate",
"(",
"f",
")",
":",
"cols",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"len",
"(",
"cols",
")",
"!=",
"2",
":",
"logging",
".",
"warning",
"(",
"\"Skipping line %d in TSV (%s) with %d != 2 columns.\"",
",",
"j",
",",
"path",
",",
"len",
"(",
"cols",
")",
")",
"continue",
"s1",
",",
"s2",
"=",
"cols",
"yield",
"{",
"l1",
":",
"s1",
".",
"strip",
"(",
")",
",",
"l2",
":",
"s2",
".",
"strip",
"(",
")",
"}"
] | Generates examples from TSV file. | [
"Generates",
"examples",
"from",
"TSV",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L861-L881 |
26,167 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | _parse_wikiheadlines | def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with tf.io.gfile.GFile(path) as f:
for line in f:
s1, s2 = line.split("|||")
yield {
l1: s1.strip(),
l2: s2.strip()
} | python | def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
with tf.io.gfile.GFile(path) as f:
for line in f:
s1, s2 = line.split("|||")
yield {
l1: s1.strip(),
l2: s2.strip()
} | [
"def",
"_parse_wikiheadlines",
"(",
"path",
")",
":",
"lang_match",
"=",
"re",
".",
"match",
"(",
"r\".*\\.([a-z][a-z])-([a-z][a-z])$\"",
",",
"path",
")",
"assert",
"lang_match",
"is",
"not",
"None",
",",
"\"Invalid Wikiheadlines filename: %s\"",
"%",
"path",
"l1",
",",
"l2",
"=",
"lang_match",
".",
"groups",
"(",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"s1",
",",
"s2",
"=",
"line",
".",
"split",
"(",
"\"|||\"",
")",
"yield",
"{",
"l1",
":",
"s1",
".",
"strip",
"(",
")",
",",
"l2",
":",
"s2",
".",
"strip",
"(",
")",
"}"
] | Generates examples from Wikiheadlines dataset file. | [
"Generates",
"examples",
"from",
"Wikiheadlines",
"dataset",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L884-L895 |
26,168 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | _parse_czeng | def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with tf.io.gfile.GFile(filter_path) as f:
bad_blocks = {
blk for blk in re.search(
r"qw{([\s\d]*)}", f.read()).groups()[0].split()
}
logging.info(
"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.",
len(bad_blocks))
for path in paths:
for gz_path in tf.io.gfile.glob(path):
with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
for line in f:
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
yield {
"cs": cs.strip(),
"en": en.strip(),
} | python | def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with tf.io.gfile.GFile(filter_path) as f:
bad_blocks = {
blk for blk in re.search(
r"qw{([\s\d]*)}", f.read()).groups()[0].split()
}
logging.info(
"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.",
len(bad_blocks))
for path in paths:
for gz_path in tf.io.gfile.glob(path):
with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
for line in f:
line = line.decode("utf-8") # required for py3
if not line.strip():
continue
id_, unused_score, cs, en = line.split("\t")
if filter_path:
block_match = re.match(re_block, id_)
if block_match and block_match.groups()[0] in bad_blocks:
continue
yield {
"cs": cs.strip(),
"en": en.strip(),
} | [
"def",
"_parse_czeng",
"(",
"*",
"paths",
",",
"*",
"*",
"kwargs",
")",
":",
"filter_path",
"=",
"kwargs",
".",
"get",
"(",
"\"filter_path\"",
",",
"None",
")",
"if",
"filter_path",
":",
"re_block",
"=",
"re",
".",
"compile",
"(",
"r\"^[^-]+-b(\\d+)-\\d\\d[tde]\"",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filter_path",
")",
"as",
"f",
":",
"bad_blocks",
"=",
"{",
"blk",
"for",
"blk",
"in",
"re",
".",
"search",
"(",
"r\"qw{([\\s\\d]*)}\"",
",",
"f",
".",
"read",
"(",
")",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"}",
"logging",
".",
"info",
"(",
"\"Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.\"",
",",
"len",
"(",
"bad_blocks",
")",
")",
"for",
"path",
"in",
"paths",
":",
"for",
"gz_path",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"glob",
"(",
"path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"gz_path",
",",
"\"rb\"",
")",
"as",
"g",
",",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"g",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# required for py3",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"id_",
",",
"unused_score",
",",
"cs",
",",
"en",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"filter_path",
":",
"block_match",
"=",
"re",
".",
"match",
"(",
"re_block",
",",
"id_",
")",
"if",
"block_match",
"and",
"block_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"in",
"bad_blocks",
":",
"continue",
"yield",
"{",
"\"cs\"",
":",
"cs",
".",
"strip",
"(",
")",
",",
"\"en\"",
":",
"en",
".",
"strip",
"(",
")",
",",
"}"
] | Generates examples from CzEng v1.6, with optional filtering for v1.7. | [
"Generates",
"examples",
"from",
"CzEng",
"v1",
".",
"6",
"with",
"optional",
"filtering",
"for",
"v1",
".",
"7",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L898-L927 |
26,169 | tensorflow/datasets | tensorflow_datasets/translate/wmt.py | WmtTranslate.subsets | def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets | python | def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets | [
"def",
"subsets",
"(",
"self",
")",
":",
"source",
",",
"target",
"=",
"self",
".",
"builder_config",
".",
"language_pair",
"filtered_subsets",
"=",
"{",
"}",
"for",
"split",
",",
"ss_names",
"in",
"self",
".",
"_subsets",
".",
"items",
"(",
")",
":",
"filtered_subsets",
"[",
"split",
"]",
"=",
"[",
"]",
"for",
"ss_name",
"in",
"ss_names",
":",
"ds",
"=",
"DATASET_MAP",
"[",
"ss_name",
"]",
"if",
"ds",
".",
"target",
"!=",
"target",
"or",
"source",
"not",
"in",
"ds",
".",
"sources",
":",
"logging",
".",
"info",
"(",
"\"Skipping sub-dataset that does not include language pair: %s\"",
",",
"ss_name",
")",
"else",
":",
"filtered_subsets",
"[",
"split",
"]",
".",
"append",
"(",
"ss_name",
")",
"logging",
".",
"info",
"(",
"\"Using sub-datasets: %s\"",
",",
"filtered_subsets",
")",
"return",
"filtered_subsets"
] | Subsets that make up each split of the dataset for the language pair. | [
"Subsets",
"that",
"make",
"up",
"each",
"split",
"of",
"the",
"dataset",
"for",
"the",
"language",
"pair",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L615-L630 |
26,170 | tensorflow/datasets | tensorflow_datasets/core/registered.py | builder | def builder(name, **builder_init_kwargs):
"""Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
builder_kwargs.update(builder_init_kwargs)
if name in _ABSTRACT_DATASET_REGISTRY:
raise DatasetNotFoundError(name, is_abstract=True)
if name in _IN_DEVELOPMENT_REGISTRY:
raise DatasetNotFoundError(name, in_development=True)
if name not in _DATASET_REGISTRY:
raise DatasetNotFoundError(name)
try:
return _DATASET_REGISTRY[name](**builder_kwargs)
except BaseException:
logging.error("Failed to construct dataset %s", name)
raise | python | def builder(name, **builder_init_kwargs):
"""Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
builder_kwargs.update(builder_init_kwargs)
if name in _ABSTRACT_DATASET_REGISTRY:
raise DatasetNotFoundError(name, is_abstract=True)
if name in _IN_DEVELOPMENT_REGISTRY:
raise DatasetNotFoundError(name, in_development=True)
if name not in _DATASET_REGISTRY:
raise DatasetNotFoundError(name)
try:
return _DATASET_REGISTRY[name](**builder_kwargs)
except BaseException:
logging.error("Failed to construct dataset %s", name)
raise | [
"def",
"builder",
"(",
"name",
",",
"*",
"*",
"builder_init_kwargs",
")",
":",
"name",
",",
"builder_kwargs",
"=",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name",
")",
"builder_kwargs",
".",
"update",
"(",
"builder_init_kwargs",
")",
"if",
"name",
"in",
"_ABSTRACT_DATASET_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
",",
"is_abstract",
"=",
"True",
")",
"if",
"name",
"in",
"_IN_DEVELOPMENT_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
",",
"in_development",
"=",
"True",
")",
"if",
"name",
"not",
"in",
"_DATASET_REGISTRY",
":",
"raise",
"DatasetNotFoundError",
"(",
"name",
")",
"try",
":",
"return",
"_DATASET_REGISTRY",
"[",
"name",
"]",
"(",
"*",
"*",
"builder_kwargs",
")",
"except",
"BaseException",
":",
"logging",
".",
"error",
"(",
"\"Failed to construct dataset %s\"",
",",
"name",
")",
"raise"
] | Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
**builder_init_kwargs: `dict` of keyword arguments passed to the
`DatasetBuilder`. These will override keyword arguments passed in `name`,
if any.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized. | [
"Fetches",
"a",
"tfds",
".",
"core",
".",
"DatasetBuilder",
"by",
"string",
"name",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L137-L172 |
26,171 | tensorflow/datasets | tensorflow_datasets/core/registered.py | load | def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds | python | def load(name,
split=None,
data_dir=None,
batch_size=1,
download=True,
as_supervised=False,
with_info=False,
builder_kwargs=None,
download_and_prepare_kwargs=None,
as_dataset_kwargs=None,
try_gcs=False):
"""Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
name, name_builder_kwargs = _dataset_name_and_kwargs_from_name_str(name)
name_builder_kwargs.update(builder_kwargs or {})
builder_kwargs = name_builder_kwargs
# Set data_dir
if try_gcs and gcs_utils.is_dataset_on_gcs(name):
data_dir = constants.GCS_DATA_DIR
elif data_dir is None:
data_dir = constants.DATA_DIR
dbuilder = builder(name, data_dir=data_dir, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs["split"] = split
as_dataset_kwargs["as_supervised"] = as_supervised
as_dataset_kwargs["batch_size"] = batch_size
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds | [
"def",
"load",
"(",
"name",
",",
"split",
"=",
"None",
",",
"data_dir",
"=",
"None",
",",
"batch_size",
"=",
"1",
",",
"download",
"=",
"True",
",",
"as_supervised",
"=",
"False",
",",
"with_info",
"=",
"False",
",",
"builder_kwargs",
"=",
"None",
",",
"download_and_prepare_kwargs",
"=",
"None",
",",
"as_dataset_kwargs",
"=",
"None",
",",
"try_gcs",
"=",
"False",
")",
":",
"name",
",",
"name_builder_kwargs",
"=",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name",
")",
"name_builder_kwargs",
".",
"update",
"(",
"builder_kwargs",
"or",
"{",
"}",
")",
"builder_kwargs",
"=",
"name_builder_kwargs",
"# Set data_dir",
"if",
"try_gcs",
"and",
"gcs_utils",
".",
"is_dataset_on_gcs",
"(",
"name",
")",
":",
"data_dir",
"=",
"constants",
".",
"GCS_DATA_DIR",
"elif",
"data_dir",
"is",
"None",
":",
"data_dir",
"=",
"constants",
".",
"DATA_DIR",
"dbuilder",
"=",
"builder",
"(",
"name",
",",
"data_dir",
"=",
"data_dir",
",",
"*",
"*",
"builder_kwargs",
")",
"if",
"download",
":",
"download_and_prepare_kwargs",
"=",
"download_and_prepare_kwargs",
"or",
"{",
"}",
"dbuilder",
".",
"download_and_prepare",
"(",
"*",
"*",
"download_and_prepare_kwargs",
")",
"if",
"as_dataset_kwargs",
"is",
"None",
":",
"as_dataset_kwargs",
"=",
"{",
"}",
"as_dataset_kwargs",
"=",
"dict",
"(",
"as_dataset_kwargs",
")",
"as_dataset_kwargs",
"[",
"\"split\"",
"]",
"=",
"split",
"as_dataset_kwargs",
"[",
"\"as_supervised\"",
"]",
"=",
"as_supervised",
"as_dataset_kwargs",
"[",
"\"batch_size\"",
"]",
"=",
"batch_size",
"ds",
"=",
"dbuilder",
".",
"as_dataset",
"(",
"*",
"*",
"as_dataset_kwargs",
")",
"if",
"with_info",
":",
"return",
"ds",
",",
"dbuilder",
".",
"info",
"return",
"ds"
] | Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `download=True`), and then calls `DatasetBuilder.as_dataset`.
This is roughly equivalent to:
```
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
if download:
builder.download_and_prepare(**download_and_prepare_kwargs)
ds = builder.as_dataset(
split=split, as_supervised=as_supervised, **as_dataset_kwargs)
if with_info:
return ds, builder.info
return ds
```
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
Callers must pass arguments as keyword arguments.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `"foo_bar/a=True,b=3"` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to
use the `"zoo"` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: `tfds.Split` or `str`, which split of the data to load. If None,
will return a `dict` with all splits (typically `tfds.Split.TRAIN` and
`tfds.Split.TEST`).
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/tensorflow_datasets".
batch_size: `int`, set to > 1 to get batches of examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
with_info: `bool`, if True, tfds.load will return the tuple
(tf.data.Dataset, tfds.core.DatasetInfo) containing the info associated
with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`. `split` will be passed through by
default. Example: `{'shuffle_files': True}`.
Note that shuffle_files is False by default unless
`split == tfds.Split.TRAIN`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`. | [
"Loads",
"the",
"named",
"dataset",
"into",
"a",
"tf",
".",
"data",
".",
"Dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L176-L297 |
26,172 | tensorflow/datasets | tensorflow_datasets/core/registered.py | _dataset_name_and_kwargs_from_name_str | def _dataset_name_and_kwargs_from_name_str(name_str):
"""Extract kwargs from name str."""
res = _NAME_REG.match(name_str)
if not res:
raise ValueError(_NAME_STR_ERR.format(name_str))
name = res.group("dataset_name")
kwargs = _kwargs_str_to_kwargs(res.group("kwargs"))
try:
for attr in ["config", "version"]:
val = res.group(attr)
if val is None:
continue
if attr in kwargs:
raise ValueError("Dataset %s: cannot pass %s twice." % (name, attr))
kwargs[attr] = val
return name, kwargs
except:
logging.error(_NAME_STR_ERR.format(name_str)) # pylint: disable=logging-format-interpolation
raise | python | def _dataset_name_and_kwargs_from_name_str(name_str):
"""Extract kwargs from name str."""
res = _NAME_REG.match(name_str)
if not res:
raise ValueError(_NAME_STR_ERR.format(name_str))
name = res.group("dataset_name")
kwargs = _kwargs_str_to_kwargs(res.group("kwargs"))
try:
for attr in ["config", "version"]:
val = res.group(attr)
if val is None:
continue
if attr in kwargs:
raise ValueError("Dataset %s: cannot pass %s twice." % (name, attr))
kwargs[attr] = val
return name, kwargs
except:
logging.error(_NAME_STR_ERR.format(name_str)) # pylint: disable=logging-format-interpolation
raise | [
"def",
"_dataset_name_and_kwargs_from_name_str",
"(",
"name_str",
")",
":",
"res",
"=",
"_NAME_REG",
".",
"match",
"(",
"name_str",
")",
"if",
"not",
"res",
":",
"raise",
"ValueError",
"(",
"_NAME_STR_ERR",
".",
"format",
"(",
"name_str",
")",
")",
"name",
"=",
"res",
".",
"group",
"(",
"\"dataset_name\"",
")",
"kwargs",
"=",
"_kwargs_str_to_kwargs",
"(",
"res",
".",
"group",
"(",
"\"kwargs\"",
")",
")",
"try",
":",
"for",
"attr",
"in",
"[",
"\"config\"",
",",
"\"version\"",
"]",
":",
"val",
"=",
"res",
".",
"group",
"(",
"attr",
")",
"if",
"val",
"is",
"None",
":",
"continue",
"if",
"attr",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"Dataset %s: cannot pass %s twice.\"",
"%",
"(",
"name",
",",
"attr",
")",
")",
"kwargs",
"[",
"attr",
"]",
"=",
"val",
"return",
"name",
",",
"kwargs",
"except",
":",
"logging",
".",
"error",
"(",
"_NAME_STR_ERR",
".",
"format",
"(",
"name_str",
")",
")",
"# pylint: disable=logging-format-interpolation",
"raise"
] | Extract kwargs from name str. | [
"Extract",
"kwargs",
"from",
"name",
"str",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L311-L329 |
26,173 | tensorflow/datasets | tensorflow_datasets/core/registered.py | _cast_to_pod | def _cast_to_pod(val):
"""Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return tf.compat.as_text(val) | python | def _cast_to_pod(val):
"""Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return tf.compat.as_text(val) | [
"def",
"_cast_to_pod",
"(",
"val",
")",
":",
"bools",
"=",
"{",
"\"True\"",
":",
"True",
",",
"\"False\"",
":",
"False",
"}",
"if",
"val",
"in",
"bools",
":",
"return",
"bools",
"[",
"val",
"]",
"try",
":",
"return",
"int",
"(",
"val",
")",
"except",
"ValueError",
":",
"try",
":",
"return",
"float",
"(",
"val",
")",
"except",
"ValueError",
":",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"val",
")"
] | Try cast to int, float, bool, str, in that order. | [
"Try",
"cast",
"to",
"int",
"float",
"bool",
"str",
"in",
"that",
"order",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L343-L354 |
26,174 | tensorflow/datasets | tensorflow_datasets/core/lazy_imports.py | _try_import | def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg) | python | def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See setup.py extras_require. "
"The dataset you are trying to use may have additional "
"dependencies.")
utils.reraise(err_msg) | [
"def",
"_try_import",
"(",
"module_name",
")",
":",
"try",
":",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"mod",
"except",
"ImportError",
":",
"err_msg",
"=",
"(",
"\"Tried importing %s but failed. See setup.py extras_require. \"",
"\"The dataset you are trying to use may have additional \"",
"\"dependencies.\"",
")",
"utils",
".",
"reraise",
"(",
"err_msg",
")"
] | Try importing a module, with an informative error message on failure. | [
"Try",
"importing",
"a",
"module",
"with",
"an",
"informative",
"error",
"message",
"on",
"failure",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/lazy_imports.py#L27-L36 |
26,175 | tensorflow/datasets | tensorflow_datasets/core/features/sequence_feature.py | np_to_list | def np_to_list(elem):
"""Returns list from list, tuple or ndarray."""
if isinstance(elem, list):
return elem
elif isinstance(elem, tuple):
return list(elem)
elif isinstance(elem, np.ndarray):
return list(elem)
else:
raise ValueError(
'Input elements of a sequence should be either a numpy array, a '
'python list or tuple. Got {}'.format(type(elem))) | python | def np_to_list(elem):
"""Returns list from list, tuple or ndarray."""
if isinstance(elem, list):
return elem
elif isinstance(elem, tuple):
return list(elem)
elif isinstance(elem, np.ndarray):
return list(elem)
else:
raise ValueError(
'Input elements of a sequence should be either a numpy array, a '
'python list or tuple. Got {}'.format(type(elem))) | [
"def",
"np_to_list",
"(",
"elem",
")",
":",
"if",
"isinstance",
"(",
"elem",
",",
"list",
")",
":",
"return",
"elem",
"elif",
"isinstance",
"(",
"elem",
",",
"tuple",
")",
":",
"return",
"list",
"(",
"elem",
")",
"elif",
"isinstance",
"(",
"elem",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"list",
"(",
"elem",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Input elements of a sequence should be either a numpy array, a '",
"'python list or tuple. Got {}'",
".",
"format",
"(",
"type",
"(",
"elem",
")",
")",
")"
] | Returns list from list, tuple or ndarray. | [
"Returns",
"list",
"from",
"list",
"tuple",
"or",
"ndarray",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/sequence_feature.py#L257-L268 |
26,176 | tensorflow/datasets | tensorflow_datasets/image/mnist.py | MNIST._generate_examples | def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
} | python | def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"num_examples",
",",
"data_path",
",",
"label_path",
")",
":",
"images",
"=",
"_extract_mnist_images",
"(",
"data_path",
",",
"num_examples",
")",
"labels",
"=",
"_extract_mnist_labels",
"(",
"label_path",
",",
"num_examples",
")",
"data",
"=",
"list",
"(",
"zip",
"(",
"images",
",",
"labels",
")",
")",
"# Data is shuffled automatically to distribute classes uniformly.",
"for",
"image",
",",
"label",
"in",
"data",
":",
"yield",
"{",
"\"image\"",
":",
"image",
",",
"\"label\"",
":",
"label",
",",
"}"
] | Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples | [
"Generate",
"MNIST",
"examples",
"as",
"dicts",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/mnist.py#L146-L166 |
26,177 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | get_dataset_feature_statistics | def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema | python | def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = dataset.output_shapes
output_types_dict = dataset.output_types
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
numeric_statistics.min = feature_to_min[feature_name]
numeric_statistics.max = feature_to_max[feature_name]
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema | [
"def",
"get_dataset_feature_statistics",
"(",
"builder",
",",
"split",
")",
":",
"statistics",
"=",
"statistics_pb2",
".",
"DatasetFeatureStatistics",
"(",
")",
"# Make this to the best of our abilities.",
"schema",
"=",
"schema_pb2",
".",
"Schema",
"(",
")",
"dataset",
"=",
"builder",
".",
"as_dataset",
"(",
"split",
"=",
"split",
")",
"# Just computing the number of examples for now.",
"statistics",
".",
"num_examples",
"=",
"0",
"# Feature dictionaries.",
"feature_to_num_examples",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"feature_to_min",
"=",
"{",
"}",
"feature_to_max",
"=",
"{",
"}",
"np_dataset",
"=",
"dataset_utils",
".",
"as_numpy",
"(",
"dataset",
")",
"for",
"example",
"in",
"utils",
".",
"tqdm",
"(",
"np_dataset",
",",
"unit",
"=",
"\" examples\"",
",",
"leave",
"=",
"False",
")",
":",
"statistics",
".",
"num_examples",
"+=",
"1",
"assert",
"isinstance",
"(",
"example",
",",
"dict",
")",
"feature_names",
"=",
"sorted",
"(",
"example",
".",
"keys",
"(",
")",
")",
"for",
"feature_name",
"in",
"feature_names",
":",
"# Update the number of examples this feature appears in.",
"feature_to_num_examples",
"[",
"feature_name",
"]",
"+=",
"1",
"feature_np",
"=",
"example",
"[",
"feature_name",
"]",
"# For compatibility in graph and eager mode, we can get PODs here and",
"# everything may not be neatly wrapped up in numpy's ndarray.",
"feature_dtype",
"=",
"type",
"(",
"feature_np",
")",
"if",
"isinstance",
"(",
"feature_np",
",",
"np",
".",
"ndarray",
")",
":",
"# If we have an empty array, then don't proceed further with computing",
"# statistics on it.",
"if",
"feature_np",
".",
"size",
"==",
"0",
":",
"continue",
"feature_dtype",
"=",
"feature_np",
".",
"dtype",
".",
"type",
"feature_min",
",",
"feature_max",
"=",
"None",
",",
"None",
"is_numeric",
"=",
"(",
"np",
".",
"issubdtype",
"(",
"feature_dtype",
",",
"np",
".",
"number",
")",
"or",
"feature_dtype",
"==",
"np",
".",
"bool_",
")",
"if",
"is_numeric",
":",
"feature_min",
"=",
"np",
".",
"min",
"(",
"feature_np",
")",
"feature_max",
"=",
"np",
".",
"max",
"(",
"feature_np",
")",
"# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add",
"# logic for that.",
"# Set or update the min, max.",
"if",
"is_numeric",
":",
"if",
"(",
"(",
"feature_name",
"not",
"in",
"feature_to_min",
")",
"or",
"(",
"feature_to_min",
"[",
"feature_name",
"]",
">",
"feature_min",
")",
")",
":",
"feature_to_min",
"[",
"feature_name",
"]",
"=",
"feature_min",
"if",
"(",
"(",
"feature_name",
"not",
"in",
"feature_to_max",
")",
"or",
"(",
"feature_to_max",
"[",
"feature_name",
"]",
"<",
"feature_max",
")",
")",
":",
"feature_to_max",
"[",
"feature_name",
"]",
"=",
"feature_max",
"# Start here, we've processed all examples.",
"output_shapes_dict",
"=",
"dataset",
".",
"output_shapes",
"output_types_dict",
"=",
"dataset",
".",
"output_types",
"for",
"feature_name",
"in",
"sorted",
"(",
"feature_to_num_examples",
".",
"keys",
"(",
")",
")",
":",
"# Try to fill in the schema.",
"feature",
"=",
"schema",
".",
"feature",
".",
"add",
"(",
")",
"feature",
".",
"name",
"=",
"feature_name",
"# TODO(afrozm): Make this work with nested structures, currently the Schema",
"# proto has no support for it.",
"maybe_feature_shape",
"=",
"output_shapes_dict",
"[",
"feature_name",
"]",
"if",
"not",
"isinstance",
"(",
"maybe_feature_shape",
",",
"tf",
".",
"TensorShape",
")",
":",
"logging",
".",
"error",
"(",
"\"Statistics generation doesn't work for nested structures yet\"",
")",
"continue",
"for",
"dim",
"in",
"maybe_feature_shape",
".",
"as_list",
"(",
")",
":",
"# We denote `None`s as -1 in the shape proto.",
"feature",
".",
"shape",
".",
"dim",
".",
"add",
"(",
")",
".",
"size",
"=",
"dim",
"if",
"dim",
"else",
"-",
"1",
"feature_type",
"=",
"output_types_dict",
"[",
"feature_name",
"]",
"feature",
".",
"type",
"=",
"_FEATURE_TYPE_MAP",
".",
"get",
"(",
"feature_type",
",",
"schema_pb2",
".",
"BYTES",
")",
"common_statistics",
"=",
"statistics_pb2",
".",
"CommonStatistics",
"(",
")",
"common_statistics",
".",
"num_non_missing",
"=",
"feature_to_num_examples",
"[",
"feature_name",
"]",
"common_statistics",
".",
"num_missing",
"=",
"(",
"statistics",
".",
"num_examples",
"-",
"common_statistics",
".",
"num_non_missing",
")",
"feature_name_statistics",
"=",
"statistics",
".",
"features",
".",
"add",
"(",
")",
"feature_name_statistics",
".",
"name",
"=",
"feature_name",
"# TODO(afrozm): This can be skipped, since type information was added to",
"# the Schema.",
"feature_name_statistics",
".",
"type",
"=",
"_SCHEMA_TYPE_MAP",
".",
"get",
"(",
"feature",
".",
"type",
",",
"statistics_pb2",
".",
"FeatureNameStatistics",
".",
"BYTES",
")",
"if",
"feature",
".",
"type",
"==",
"schema_pb2",
".",
"INT",
"or",
"feature",
".",
"type",
"==",
"schema_pb2",
".",
"FLOAT",
":",
"numeric_statistics",
"=",
"statistics_pb2",
".",
"NumericStatistics",
"(",
")",
"numeric_statistics",
".",
"min",
"=",
"feature_to_min",
"[",
"feature_name",
"]",
"numeric_statistics",
".",
"max",
"=",
"feature_to_max",
"[",
"feature_name",
"]",
"numeric_statistics",
".",
"common_stats",
".",
"CopyFrom",
"(",
"common_statistics",
")",
"feature_name_statistics",
".",
"num_stats",
".",
"CopyFrom",
"(",
"numeric_statistics",
")",
"else",
":",
"# Let's shove it into BytesStatistics for now.",
"bytes_statistics",
"=",
"statistics_pb2",
".",
"BytesStatistics",
"(",
")",
"bytes_statistics",
".",
"common_stats",
".",
"CopyFrom",
"(",
"common_statistics",
")",
"feature_name_statistics",
".",
"bytes_stats",
".",
"CopyFrom",
"(",
"bytes_statistics",
")",
"return",
"statistics",
",",
"schema"
] | Calculate statistics for the specified split. | [
"Calculate",
"statistics",
"for",
"the",
"specified",
"split",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L443-L556 |
26,178 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | read_from_json | def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto | python | def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto | [
"def",
"read_from_json",
"(",
"json_filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"json_filename",
")",
"as",
"f",
":",
"dataset_info_json_str",
"=",
"f",
".",
"read",
"(",
")",
"# Parse it back into a proto.",
"parsed_proto",
"=",
"json_format",
".",
"Parse",
"(",
"dataset_info_json_str",
",",
"dataset_info_pb2",
".",
"DatasetInfo",
"(",
")",
")",
"return",
"parsed_proto"
] | Read JSON-formatted proto into DatasetInfo proto. | [
"Read",
"JSON",
"-",
"formatted",
"proto",
"into",
"DatasetInfo",
"proto",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L559-L566 |
26,179 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | DatasetInfo.update_splits_if_different | def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict) | python | def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict) | [
"def",
"update_splits_if_different",
"(",
"self",
",",
"split_dict",
")",
":",
"assert",
"isinstance",
"(",
"split_dict",
",",
"splits_lib",
".",
"SplitDict",
")",
"# If splits are already defined and identical, then we do not update",
"if",
"self",
".",
"_splits",
"and",
"splits_lib",
".",
"check_splits_equals",
"(",
"self",
".",
"_splits",
",",
"split_dict",
")",
":",
"return",
"self",
".",
"_set_splits",
"(",
"split_dict",
")"
] | Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split | [
"Overwrite",
"the",
"splits",
"if",
"they",
"are",
"different",
"from",
"the",
"current",
"ones",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L197-L217 |
26,180 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | DatasetInfo._compute_dynamic_properties | def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits) | python | def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits) | [
"def",
"_compute_dynamic_properties",
"(",
"self",
",",
"builder",
")",
":",
"# Fill other things by going over the dataset.",
"splits",
"=",
"self",
".",
"splits",
"for",
"split_info",
"in",
"utils",
".",
"tqdm",
"(",
"splits",
".",
"values",
"(",
")",
",",
"desc",
"=",
"\"Computing statistics...\"",
",",
"unit",
"=",
"\" split\"",
")",
":",
"try",
":",
"split_name",
"=",
"split_info",
".",
"name",
"# Fill DatasetFeatureStatistics.",
"dataset_feature_statistics",
",",
"schema",
"=",
"get_dataset_feature_statistics",
"(",
"builder",
",",
"split_name",
")",
"# Add the statistics to this split.",
"split_info",
".",
"statistics",
".",
"CopyFrom",
"(",
"dataset_feature_statistics",
")",
"# Set the schema at the top-level since this is independent of the",
"# split.",
"self",
".",
"as_proto",
".",
"schema",
".",
"CopyFrom",
"(",
"schema",
")",
"except",
"tf",
".",
"errors",
".",
"InvalidArgumentError",
":",
"# This means there is no such split, even though it was specified in the",
"# info, the least we can do is to log this.",
"logging",
".",
"error",
"(",
"(",
"\"%s's info() property specifies split %s, but it \"",
"\"doesn't seem to have been generated. Please ensure \"",
"\"that the data was downloaded for this split and re-run \"",
"\"download_and_prepare.\"",
")",
",",
"self",
".",
"name",
",",
"split_name",
")",
"raise",
"# Set splits to trigger proto update in setter",
"self",
".",
"_set_splits",
"(",
"splits",
")"
] | Update from the DatasetBuilder. | [
"Update",
"from",
"the",
"DatasetBuilder",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L249-L278 |
26,181 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | DatasetInfo.write_to_directory | def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json) | python | def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_filename(dataset_info_dir),
"w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_filename(dataset_info_dir),
"w") as f:
f.write(self.as_json) | [
"def",
"write_to_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"# Save the metadata from the features (vocabulary, labels,...)",
"if",
"self",
".",
"features",
":",
"self",
".",
"features",
".",
"save_metadata",
"(",
"dataset_info_dir",
")",
"if",
"self",
".",
"redistribution_info",
".",
"license",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"self",
".",
"_license_filename",
"(",
"dataset_info_dir",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"redistribution_info",
".",
"license",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"self",
".",
"_dataset_info_filename",
"(",
"dataset_info_dir",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"as_json",
")"
] | Write `DatasetInfo` as JSON to `dataset_info_dir`. | [
"Write",
"DatasetInfo",
"as",
"JSON",
"to",
"dataset_info_dir",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L284-L297 |
26,182 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | DatasetInfo.read_from_directory | def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True | python | def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_filename(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True | [
"def",
"read_from_directory",
"(",
"self",
",",
"dataset_info_dir",
")",
":",
"if",
"not",
"dataset_info_dir",
":",
"raise",
"ValueError",
"(",
"\"Calling read_from_directory with undefined dataset_info_dir.\"",
")",
"json_filename",
"=",
"self",
".",
"_dataset_info_filename",
"(",
"dataset_info_dir",
")",
"# Load the metadata from disk",
"parsed_proto",
"=",
"read_from_json",
"(",
"json_filename",
")",
"# Update splits",
"self",
".",
"_set_splits",
"(",
"splits_lib",
".",
"SplitDict",
".",
"from_proto",
"(",
"parsed_proto",
".",
"splits",
")",
")",
"# Restore the feature metadata (vocabulary, labels names,...)",
"if",
"self",
".",
"features",
":",
"self",
".",
"features",
".",
"load_metadata",
"(",
"dataset_info_dir",
")",
"# Update fields which are not defined in the code. This means that",
"# the code will overwrite fields which are present in",
"# dataset_info.json.",
"for",
"field_name",
",",
"field",
"in",
"self",
".",
"as_proto",
".",
"DESCRIPTOR",
".",
"fields_by_name",
".",
"items",
"(",
")",
":",
"field_value",
"=",
"getattr",
"(",
"self",
".",
"_info_proto",
",",
"field_name",
")",
"field_value_restored",
"=",
"getattr",
"(",
"parsed_proto",
",",
"field_name",
")",
"try",
":",
"is_defined",
"=",
"self",
".",
"_info_proto",
".",
"HasField",
"(",
"field_name",
")",
"except",
"ValueError",
":",
"is_defined",
"=",
"bool",
"(",
"field_value",
")",
"try",
":",
"is_defined_in_restored",
"=",
"parsed_proto",
".",
"HasField",
"(",
"field_name",
")",
"except",
"ValueError",
":",
"is_defined_in_restored",
"=",
"bool",
"(",
"field_value_restored",
")",
"# If field is defined in code, we ignore the value",
"if",
"is_defined",
":",
"if",
"field_value",
"!=",
"field_value_restored",
":",
"logging",
".",
"info",
"(",
"\"Field info.%s from disk and from code do not match. Keeping \"",
"\"the one from code.\"",
",",
"field_name",
")",
"continue",
"# If the field is also not defined in JSON file, we do nothing",
"if",
"not",
"is_defined_in_restored",
":",
"continue",
"# Otherwise, we restore the dataset_info.json value",
"if",
"field",
".",
"type",
"==",
"field",
".",
"TYPE_MESSAGE",
":",
"field_value",
".",
"MergeFrom",
"(",
"field_value_restored",
")",
"else",
":",
"setattr",
"(",
"self",
".",
"_info_proto",
",",
"field_name",
",",
"field_value_restored",
")",
"if",
"self",
".",
"_builder",
".",
"_version",
"!=",
"self",
".",
"version",
":",
"# pylint: disable=protected-access",
"raise",
"AssertionError",
"(",
"\"The constructed DatasetInfo instance and the restored proto version \"",
"\"do not match. Builder version: {}. Proto version: {}\"",
".",
"format",
"(",
"self",
".",
"_builder",
".",
"_version",
",",
"self",
".",
"version",
")",
")",
"# pylint: disable=protected-access",
"# Mark as fully initialized.",
"self",
".",
"_fully_initialized",
"=",
"True"
] | Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version. | [
"Update",
"DatasetInfo",
"from",
"the",
"JSON",
"file",
"in",
"dataset_info_dir",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L299-L367 |
26,183 | tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | DatasetInfo.initialize_from_bucket | def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir) | python | def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir) | [
"def",
"initialize_from_bucket",
"(",
"self",
")",
":",
"# In order to support Colab, we use the HTTP GCS API to access the metadata",
"# files. They are copied locally and then loaded.",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"\"tfds\"",
")",
"data_files",
"=",
"gcs_utils",
".",
"gcs_dataset_info_files",
"(",
"self",
".",
"full_name",
")",
"if",
"not",
"data_files",
":",
"return",
"logging",
".",
"info",
"(",
"\"Loading info from GCS for %s\"",
",",
"self",
".",
"full_name",
")",
"for",
"fname",
"in",
"data_files",
":",
"out_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"gcs_utils",
".",
"download_gcs_file",
"(",
"fname",
",",
"out_fname",
")",
"self",
".",
"read_from_directory",
"(",
"tmp_dir",
")"
] | Initialize DatasetInfo from GCS bucket info files. | [
"Initialize",
"DatasetInfo",
"from",
"GCS",
"bucket",
"info",
"files",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_info.py#L369-L381 |
26,184 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | _map_promise | def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res | python | def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res | [
"def",
"_map_promise",
"(",
"map_fn",
",",
"all_inputs",
")",
":",
"all_promises",
"=",
"utils",
".",
"map_nested",
"(",
"map_fn",
",",
"all_inputs",
")",
"# Apply the function",
"res",
"=",
"utils",
".",
"map_nested",
"(",
"_wait_on_promise",
",",
"all_promises",
")",
"return",
"res"
] | Map the function into each element and resolve the promise. | [
"Map",
"the",
"function",
"into",
"each",
"element",
"and",
"resolve",
"the",
"promise",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L392-L396 |
26,185 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager._handle_download_result | def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path | python | def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('More than one file in %s.' % tmp_dir_path)
original_fname = fnames[0]
tmp_path = os.path.join(tmp_dir_path, original_fname)
self._recorded_sizes_checksums[resource.url] = (dl_size, sha256)
if self._register_checksums:
self._record_sizes_checksums()
elif (dl_size, sha256) != self._sizes_checksums.get(resource.url, None):
raise NonMatchingChecksumError(resource.url, tmp_path)
download_path = self._get_final_dl_path(resource.url, sha256)
resource_lib.write_info_file(resource, download_path, self._dataset_name,
original_fname)
# Unconditionally overwrite because either file doesn't exist or
# FORCE_DOWNLOAD=true
tf.io.gfile.rename(tmp_path, download_path, overwrite=True)
tf.io.gfile.rmtree(tmp_dir_path)
return download_path | [
"def",
"_handle_download_result",
"(",
"self",
",",
"resource",
",",
"tmp_dir_path",
",",
"sha256",
",",
"dl_size",
")",
":",
"fnames",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"tmp_dir_path",
")",
"if",
"len",
"(",
"fnames",
")",
">",
"1",
":",
"raise",
"AssertionError",
"(",
"'More than one file in %s.'",
"%",
"tmp_dir_path",
")",
"original_fname",
"=",
"fnames",
"[",
"0",
"]",
"tmp_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir_path",
",",
"original_fname",
")",
"self",
".",
"_recorded_sizes_checksums",
"[",
"resource",
".",
"url",
"]",
"=",
"(",
"dl_size",
",",
"sha256",
")",
"if",
"self",
".",
"_register_checksums",
":",
"self",
".",
"_record_sizes_checksums",
"(",
")",
"elif",
"(",
"dl_size",
",",
"sha256",
")",
"!=",
"self",
".",
"_sizes_checksums",
".",
"get",
"(",
"resource",
".",
"url",
",",
"None",
")",
":",
"raise",
"NonMatchingChecksumError",
"(",
"resource",
".",
"url",
",",
"tmp_path",
")",
"download_path",
"=",
"self",
".",
"_get_final_dl_path",
"(",
"resource",
".",
"url",
",",
"sha256",
")",
"resource_lib",
".",
"write_info_file",
"(",
"resource",
",",
"download_path",
",",
"self",
".",
"_dataset_name",
",",
"original_fname",
")",
"# Unconditionally overwrite because either file doesn't exist or",
"# FORCE_DOWNLOAD=true",
"tf",
".",
"io",
".",
"gfile",
".",
"rename",
"(",
"tmp_path",
",",
"download_path",
",",
"overwrite",
"=",
"True",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"rmtree",
"(",
"tmp_dir_path",
")",
"return",
"download_path"
] | Store dled file to definitive place, write INFO file, return path. | [
"Store",
"dled",
"file",
"to",
"definitive",
"place",
"write",
"INFO",
"file",
"return",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L196-L215 |
26,186 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager._download | def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback) | python | def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksums:
expected_sha256 = self._sizes_checksums[url][1]
download_path = self._get_final_dl_path(url, expected_sha256)
if not self._force_download and resource.exists_locally(download_path):
logging.info('URL %s already downloaded: reusing %s.',
url, download_path)
self._recorded_sizes_checksums[url] = self._sizes_checksums[url]
return promise.Promise.resolve(download_path)
# There is a slight difference between downloader and extractor here:
# the extractor manages its own temp directory, while the DownloadManager
# manages the temp directory of downloader.
download_dir_path = os.path.join(
self._download_dir,
'%s.tmp.%s' % (resource_lib.get_dl_dirname(url), uuid.uuid4().hex))
tf.io.gfile.makedirs(download_dir_path)
logging.info('Downloading %s into %s...', url, download_dir_path)
def callback(val):
checksum, dl_size = val
return self._handle_download_result(
resource, download_dir_path, checksum, dl_size)
return self._downloader.download(url, download_dir_path).then(callback) | [
"def",
"_download",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"url",
"=",
"resource",
".",
"url",
"if",
"url",
"in",
"self",
".",
"_sizes_checksums",
":",
"expected_sha256",
"=",
"self",
".",
"_sizes_checksums",
"[",
"url",
"]",
"[",
"1",
"]",
"download_path",
"=",
"self",
".",
"_get_final_dl_path",
"(",
"url",
",",
"expected_sha256",
")",
"if",
"not",
"self",
".",
"_force_download",
"and",
"resource",
".",
"exists_locally",
"(",
"download_path",
")",
":",
"logging",
".",
"info",
"(",
"'URL %s already downloaded: reusing %s.'",
",",
"url",
",",
"download_path",
")",
"self",
".",
"_recorded_sizes_checksums",
"[",
"url",
"]",
"=",
"self",
".",
"_sizes_checksums",
"[",
"url",
"]",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"download_path",
")",
"# There is a slight difference between downloader and extractor here:",
"# the extractor manages its own temp directory, while the DownloadManager",
"# manages the temp directory of downloader.",
"download_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_download_dir",
",",
"'%s.tmp.%s'",
"%",
"(",
"resource_lib",
".",
"get_dl_dirname",
"(",
"url",
")",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
")",
"tf",
".",
"io",
".",
"gfile",
".",
"makedirs",
"(",
"download_dir_path",
")",
"logging",
".",
"info",
"(",
"'Downloading %s into %s...'",
",",
"url",
",",
"download_dir_path",
")",
"def",
"callback",
"(",
"val",
")",
":",
"checksum",
",",
"dl_size",
"=",
"val",
"return",
"self",
".",
"_handle_download_result",
"(",
"resource",
",",
"download_dir_path",
",",
"checksum",
",",
"dl_size",
")",
"return",
"self",
".",
"_downloader",
".",
"download",
"(",
"url",
",",
"download_dir_path",
")",
".",
"then",
"(",
"callback",
")"
] | Download resource, returns Promise->path to downloaded file. | [
"Download",
"resource",
"returns",
"Promise",
"-",
">",
"path",
"to",
"downloaded",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L221-L247 |
26,187 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager._extract | def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path) | python | def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_method = resource.extract_method
if extract_method == resource_lib.ExtractMethod.NO_EXTRACT:
logging.info('Skipping extraction for %s (method=NO_EXTRACT).', path)
return promise.Promise.resolve(path)
method_name = resource_lib.ExtractMethod(extract_method).name
extract_path = os.path.join(self._extract_dir,
'%s.%s' % (method_name, os.path.basename(path)))
if not self._force_extraction and tf.io.gfile.exists(extract_path):
logging.info('Reusing extraction of %s at %s.', path, extract_path)
return promise.Promise.resolve(extract_path)
return self._extractor.extract(path, extract_method, extract_path) | [
"def",
"_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"path",
"=",
"resource",
".",
"path",
"extract_method",
"=",
"resource",
".",
"extract_method",
"if",
"extract_method",
"==",
"resource_lib",
".",
"ExtractMethod",
".",
"NO_EXTRACT",
":",
"logging",
".",
"info",
"(",
"'Skipping extraction for %s (method=NO_EXTRACT).'",
",",
"path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"path",
")",
"method_name",
"=",
"resource_lib",
".",
"ExtractMethod",
"(",
"extract_method",
")",
".",
"name",
"extract_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_extract_dir",
",",
"'%s.%s'",
"%",
"(",
"method_name",
",",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
")",
"if",
"not",
"self",
".",
"_force_extraction",
"and",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"extract_path",
")",
":",
"logging",
".",
"info",
"(",
"'Reusing extraction of %s at %s.'",
",",
"path",
",",
"extract_path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"extract_path",
")",
"return",
"self",
".",
"_extractor",
".",
"extract",
"(",
"path",
",",
"extract_method",
",",
"extract_path",
")"
] | Extract a single archive, returns Promise->path to extraction result. | [
"Extract",
"a",
"single",
"archive",
"returns",
"Promise",
"-",
">",
"path",
"to",
"extraction",
"result",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L251-L266 |
26,188 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager._download_extract | def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback) | python | def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
return self._extract(resource)
return self._download(resource).then(callback) | [
"def",
"_download_extract",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"url",
"=",
"resource",
")",
"def",
"callback",
"(",
"path",
")",
":",
"resource",
".",
"path",
"=",
"path",
"return",
"self",
".",
"_extract",
"(",
"resource",
")",
"return",
"self",
".",
"_download",
"(",
"resource",
")",
".",
"then",
"(",
"callback",
")"
] | Download-extract `Resource` or url, returns Promise->path. | [
"Download",
"-",
"extract",
"Resource",
"or",
"url",
"returns",
"Promise",
"-",
">",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L270-L277 |
26,189 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager.download_kaggle_data | def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls))) | python | def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
files = kaggle_downloader.competition_files
return _map_promise(self._download,
dict((f, u) for (f, u) in zip(files, urls))) | [
"def",
"download_kaggle_data",
"(",
"self",
",",
"competition_name",
")",
":",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"kaggle_downloader",
"=",
"self",
".",
"_downloader",
".",
"kaggle_downloader",
"(",
"competition_name",
")",
"urls",
"=",
"kaggle_downloader",
".",
"competition_urls",
"files",
"=",
"kaggle_downloader",
".",
"competition_files",
"return",
"_map_promise",
"(",
"self",
".",
"_download",
",",
"dict",
"(",
"(",
"f",
",",
"u",
")",
"for",
"(",
"f",
",",
"u",
")",
"in",
"zip",
"(",
"files",
",",
"urls",
")",
")",
")"
] | Download data for a given Kaggle competition. | [
"Download",
"data",
"for",
"a",
"given",
"Kaggle",
"competition",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L279-L286 |
26,190 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager.iter_archive | def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method) | python | def iter_archive(self, resource):
"""Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
"""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
return extractor.iter_archive(resource.path, resource.extract_method) | [
"def",
"iter_archive",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"six",
".",
"string_types",
")",
":",
"resource",
"=",
"resource_lib",
".",
"Resource",
"(",
"path",
"=",
"resource",
")",
"return",
"extractor",
".",
"iter_archive",
"(",
"resource",
".",
"path",
",",
"resource",
".",
"extract_method",
")"
] | Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj). | [
"Returns",
"iterator",
"over",
"files",
"within",
"archive",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L303-L317 |
26,191 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager.download_and_extract | def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls) | python | def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
# Add progress bar to follow the download state
with self._downloader.tqdm():
with self._extractor.tqdm():
return _map_promise(self._download_extract, url_or_urls) | [
"def",
"download_and_extract",
"(",
"self",
",",
"url_or_urls",
")",
":",
"# Add progress bar to follow the download state",
"with",
"self",
".",
"_downloader",
".",
"tqdm",
"(",
")",
":",
"with",
"self",
".",
"_extractor",
".",
"tqdm",
"(",
")",
":",
"return",
"_map_promise",
"(",
"self",
".",
"_download_extract",
",",
"url_or_urls",
")"
] | Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method will
automatically be deduced from downloaded file name.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s). | [
"Download",
"and",
"extract",
"given",
"url_or_urls",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L337-L359 |
26,192 | tensorflow/datasets | tensorflow_datasets/core/download/download_manager.py | DownloadManager.manual_dir | def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir | python | def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir | [
"def",
"manual_dir",
"(",
"self",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"self",
".",
"_manual_dir",
")",
":",
"raise",
"AssertionError",
"(",
"'Manual directory {} does not exist. Create it and download/extract '",
"'dataset artifacts in there.'",
".",
"format",
"(",
"self",
".",
"_manual_dir",
")",
")",
"return",
"self",
".",
"_manual_dir"
] | Returns the directory containing the manually extracted data. | [
"Returns",
"the",
"directory",
"containing",
"the",
"manually",
"extracted",
"data",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L362-L368 |
26,193 | tensorflow/datasets | tensorflow_datasets/image/cifar10_corrupted.py | Cifar10Corrupted._split_generators | def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
] | python | def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=1,
gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})
] | [
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"path",
"=",
"dl_manager",
".",
"download_and_extract",
"(",
"_DOWNLOAD_URL",
")",
"return",
"[",
"tfds",
".",
"core",
".",
"SplitGenerator",
"(",
"name",
"=",
"tfds",
".",
"Split",
".",
"TEST",
",",
"num_shards",
"=",
"1",
",",
"gen_kwargs",
"=",
"{",
"'data_dir'",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"_DIRNAME",
")",
"}",
")",
"]"
] | Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split. | [
"Return",
"the",
"test",
"split",
"of",
"Cifar10",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L138-L153 |
26,194 | tensorflow/datasets | tensorflow_datasets/image/cifar10_corrupted.py | Cifar10Corrupted._generate_examples | def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
} | python | def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_config.corruption
severity = self.builder_config.severity
images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption])
labels_file = os.path.join(data_dir, _LABELS_FILENAME)
with tf.io.gfile.GFile(labels_file, mode='rb') as f:
labels = np.load(f)
num_images = labels.shape[0] // 5
# Labels are stacked 5 times so we can just read the first iteration
labels = labels[:num_images]
with tf.io.gfile.GFile(images_file, mode='rb') as f:
images = np.load(f)
# Slice images corresponding to correct severity level
images = images[(severity - 1) * num_images:severity * num_images]
for image, label in zip(images, labels):
yield {
'image': image,
'label': label,
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"corruption",
"=",
"self",
".",
"builder_config",
".",
"corruption",
"severity",
"=",
"self",
".",
"builder_config",
".",
"severity",
"images_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"_CORRUPTIONS_TO_FILENAMES",
"[",
"corruption",
"]",
")",
"labels_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"_LABELS_FILENAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"labels",
"=",
"np",
".",
"load",
"(",
"f",
")",
"num_images",
"=",
"labels",
".",
"shape",
"[",
"0",
"]",
"//",
"5",
"# Labels are stacked 5 times so we can just read the first iteration",
"labels",
"=",
"labels",
"[",
":",
"num_images",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"images_file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"images",
"=",
"np",
".",
"load",
"(",
"f",
")",
"# Slice images corresponding to correct severity level",
"images",
"=",
"images",
"[",
"(",
"severity",
"-",
"1",
")",
"*",
"num_images",
":",
"severity",
"*",
"num_images",
"]",
"for",
"image",
",",
"label",
"in",
"zip",
"(",
"images",
",",
"labels",
")",
":",
"yield",
"{",
"'image'",
":",
"image",
",",
"'label'",
":",
"label",
",",
"}"
] | Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label. | [
"Generate",
"corrupted",
"Cifar10",
"test",
"data",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar10_corrupted.py#L155-L189 |
26,195 | tensorflow/datasets | tensorflow_datasets/scripts/document_datasets.py | document_single_builder | def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str | python | def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
mod_file = mod_file[:-1]
description_prefix = ""
if builder.builder_configs:
# Dataset with configs; document each one
config_docs = []
for config in builder.BUILDER_CONFIGS:
builder = tfds.builder(builder.name, config=config)
info = builder.info
# TODO(rsepassi): document the actual config object
config_doc = SINGLE_CONFIG_ENTRY.format(
builder_name=builder.name,
config_name=config.name,
description=config.description,
version=config.version,
feature_information=make_feature_information(info),
size=tfds.units.size_str(info.size_in_bytes),
)
config_docs.append(config_doc)
out_str = DATASET_WITH_CONFIGS_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
config_names="\n".join([
CONFIG_BULLET.format(name=config.name,
description=config.description,
version=config.version,
size=tfds.units.size_str(tfds.builder(
builder.name, config=config)
.info.size_in_bytes))
for config in builder.BUILDER_CONFIGS]),
config_cls="%s.%s" % (tfds_mod_name(mod_name),
type(builder.builder_config).__name__),
configs="\n".join(config_docs),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
statistics_information=make_statistics_information(info),
description=builder.info.description,
description_prefix=description_prefix,
)
else:
info = builder.info
out_str = DATASET_ENTRY.format(
snakecase_name=builder.name,
module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name),
cls_url=cls_url(mod_name),
description=info.description,
description_prefix=description_prefix,
version=info.version,
feature_information=make_feature_information(info),
statistics_information=make_statistics_information(info),
urls=format_urls(info.urls),
url=url_from_info(info),
supervised_keys=str(info.supervised_keys),
citation=make_citation(info.citation),
size=tfds.units.size_str(info.size_in_bytes),
)
out_str = schema_org(builder) + "\n" + out_str
return out_str | [
"def",
"document_single_builder",
"(",
"builder",
")",
":",
"mod_name",
"=",
"builder",
".",
"__class__",
".",
"__module__",
"cls_name",
"=",
"builder",
".",
"__class__",
".",
"__name__",
"mod_file",
"=",
"sys",
".",
"modules",
"[",
"mod_name",
"]",
".",
"__file__",
"if",
"mod_file",
".",
"endswith",
"(",
"\"pyc\"",
")",
":",
"mod_file",
"=",
"mod_file",
"[",
":",
"-",
"1",
"]",
"description_prefix",
"=",
"\"\"",
"if",
"builder",
".",
"builder_configs",
":",
"# Dataset with configs; document each one",
"config_docs",
"=",
"[",
"]",
"for",
"config",
"in",
"builder",
".",
"BUILDER_CONFIGS",
":",
"builder",
"=",
"tfds",
".",
"builder",
"(",
"builder",
".",
"name",
",",
"config",
"=",
"config",
")",
"info",
"=",
"builder",
".",
"info",
"# TODO(rsepassi): document the actual config object",
"config_doc",
"=",
"SINGLE_CONFIG_ENTRY",
".",
"format",
"(",
"builder_name",
"=",
"builder",
".",
"name",
",",
"config_name",
"=",
"config",
".",
"name",
",",
"description",
"=",
"config",
".",
"description",
",",
"version",
"=",
"config",
".",
"version",
",",
"feature_information",
"=",
"make_feature_information",
"(",
"info",
")",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"info",
".",
"size_in_bytes",
")",
",",
")",
"config_docs",
".",
"append",
"(",
"config_doc",
")",
"out_str",
"=",
"DATASET_WITH_CONFIGS_ENTRY",
".",
"format",
"(",
"snakecase_name",
"=",
"builder",
".",
"name",
",",
"module_and_class",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"cls_name",
")",
",",
"cls_url",
"=",
"cls_url",
"(",
"mod_name",
")",
",",
"config_names",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"CONFIG_BULLET",
".",
"format",
"(",
"name",
"=",
"config",
".",
"name",
",",
"description",
"=",
"config",
".",
"description",
",",
"version",
"=",
"config",
".",
"version",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"tfds",
".",
"builder",
"(",
"builder",
".",
"name",
",",
"config",
"=",
"config",
")",
".",
"info",
".",
"size_in_bytes",
")",
")",
"for",
"config",
"in",
"builder",
".",
"BUILDER_CONFIGS",
"]",
")",
",",
"config_cls",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"type",
"(",
"builder",
".",
"builder_config",
")",
".",
"__name__",
")",
",",
"configs",
"=",
"\"\\n\"",
".",
"join",
"(",
"config_docs",
")",
",",
"urls",
"=",
"format_urls",
"(",
"info",
".",
"urls",
")",
",",
"url",
"=",
"url_from_info",
"(",
"info",
")",
",",
"supervised_keys",
"=",
"str",
"(",
"info",
".",
"supervised_keys",
")",
",",
"citation",
"=",
"make_citation",
"(",
"info",
".",
"citation",
")",
",",
"statistics_information",
"=",
"make_statistics_information",
"(",
"info",
")",
",",
"description",
"=",
"builder",
".",
"info",
".",
"description",
",",
"description_prefix",
"=",
"description_prefix",
",",
")",
"else",
":",
"info",
"=",
"builder",
".",
"info",
"out_str",
"=",
"DATASET_ENTRY",
".",
"format",
"(",
"snakecase_name",
"=",
"builder",
".",
"name",
",",
"module_and_class",
"=",
"\"%s.%s\"",
"%",
"(",
"tfds_mod_name",
"(",
"mod_name",
")",
",",
"cls_name",
")",
",",
"cls_url",
"=",
"cls_url",
"(",
"mod_name",
")",
",",
"description",
"=",
"info",
".",
"description",
",",
"description_prefix",
"=",
"description_prefix",
",",
"version",
"=",
"info",
".",
"version",
",",
"feature_information",
"=",
"make_feature_information",
"(",
"info",
")",
",",
"statistics_information",
"=",
"make_statistics_information",
"(",
"info",
")",
",",
"urls",
"=",
"format_urls",
"(",
"info",
".",
"urls",
")",
",",
"url",
"=",
"url_from_info",
"(",
"info",
")",
",",
"supervised_keys",
"=",
"str",
"(",
"info",
".",
"supervised_keys",
")",
",",
"citation",
"=",
"make_citation",
"(",
"info",
".",
"citation",
")",
",",
"size",
"=",
"tfds",
".",
"units",
".",
"size_str",
"(",
"info",
".",
"size_in_bytes",
")",
",",
")",
"out_str",
"=",
"schema_org",
"(",
"builder",
")",
"+",
"\"\\n\"",
"+",
"out_str",
"return",
"out_str"
] | Doc string for a single builder, with or without configs. | [
"Doc",
"string",
"for",
"a",
"single",
"builder",
"with",
"or",
"without",
"configs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L196-L265 |
26,196 | tensorflow/datasets | tensorflow_datasets/scripts/document_datasets.py | make_module_to_builder_dict | def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder | python | def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list)))
# pylint: enable=g-long-lambda
if datasets:
builders = [tfds.builder(name) for name in datasets]
else:
builders = [
tfds.builder(name)
for name in tfds.list_builders()
if name not in BUILDER_BLACKLIST
] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")]
for builder in builders:
mod_name = builder.__class__.__module__
modules = mod_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder | [
"def",
"make_module_to_builder_dict",
"(",
"datasets",
"=",
"None",
")",
":",
"# pylint: disable=g-long-lambda",
"# dict to hold tfds->image->mnist->[builders]",
"module_to_builder",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"collections",
".",
"defaultdict",
"(",
"list",
")",
")",
")",
"# pylint: enable=g-long-lambda",
"if",
"datasets",
":",
"builders",
"=",
"[",
"tfds",
".",
"builder",
"(",
"name",
")",
"for",
"name",
"in",
"datasets",
"]",
"else",
":",
"builders",
"=",
"[",
"tfds",
".",
"builder",
"(",
"name",
")",
"for",
"name",
"in",
"tfds",
".",
"list_builders",
"(",
")",
"if",
"name",
"not",
"in",
"BUILDER_BLACKLIST",
"]",
"+",
"[",
"tfds",
".",
"builder",
"(",
"\"image_label_folder\"",
",",
"dataset_name",
"=",
"\"image_label_folder\"",
")",
"]",
"for",
"builder",
"in",
"builders",
":",
"mod_name",
"=",
"builder",
".",
"__class__",
".",
"__module__",
"modules",
"=",
"mod_name",
".",
"split",
"(",
"\".\"",
")",
"if",
"\"testing\"",
"in",
"modules",
":",
"continue",
"current_mod_ctr",
"=",
"module_to_builder",
"for",
"mod",
"in",
"modules",
":",
"current_mod_ctr",
"=",
"current_mod_ctr",
"[",
"mod",
"]",
"current_mod_ctr",
".",
"append",
"(",
"builder",
")",
"module_to_builder",
"=",
"module_to_builder",
"[",
"\"tensorflow_datasets\"",
"]",
"return",
"module_to_builder"
] | Get all builders organized by module in nested dicts. | [
"Get",
"all",
"builders",
"organized",
"by",
"module",
"in",
"nested",
"dicts",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L275-L305 |
26,197 | tensorflow/datasets | tensorflow_datasets/scripts/document_datasets.py | _pprint_features_dict | def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines) | python | def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
type(features_dict).__name__,
)
lines = [first_line]
for k in sorted(list(features_dict.keys())):
v = features_dict[k]
if isinstance(v, tfds.features.FeaturesDict):
v_str = _pprint_features_dict(v, indent + 4, False)
else:
v_str = str(v)
lines.append("%s'%s': %s," % (indent_str, k, v_str))
lines.append("%s})" % first_last_indent_str)
return "\n".join(lines) | [
"def",
"_pprint_features_dict",
"(",
"features_dict",
",",
"indent",
"=",
"0",
",",
"add_prefix",
"=",
"True",
")",
":",
"first_last_indent_str",
"=",
"\" \"",
"*",
"indent",
"indent_str",
"=",
"\" \"",
"*",
"(",
"indent",
"+",
"4",
")",
"first_line",
"=",
"\"%s%s({\"",
"%",
"(",
"first_last_indent_str",
"if",
"add_prefix",
"else",
"\"\"",
",",
"type",
"(",
"features_dict",
")",
".",
"__name__",
",",
")",
"lines",
"=",
"[",
"first_line",
"]",
"for",
"k",
"in",
"sorted",
"(",
"list",
"(",
"features_dict",
".",
"keys",
"(",
")",
")",
")",
":",
"v",
"=",
"features_dict",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"v",
",",
"tfds",
".",
"features",
".",
"FeaturesDict",
")",
":",
"v_str",
"=",
"_pprint_features_dict",
"(",
"v",
",",
"indent",
"+",
"4",
",",
"False",
")",
"else",
":",
"v_str",
"=",
"str",
"(",
"v",
")",
"lines",
".",
"append",
"(",
"\"%s'%s': %s,\"",
"%",
"(",
"indent_str",
",",
"k",
",",
"v_str",
")",
")",
"lines",
".",
"append",
"(",
"\"%s})\"",
"%",
"first_last_indent_str",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] | Pretty-print tfds.features.FeaturesDict. | [
"Pretty",
"-",
"print",
"tfds",
".",
"features",
".",
"FeaturesDict",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L308-L325 |
26,198 | tensorflow/datasets | tensorflow_datasets/scripts/document_datasets.py | make_statistics_information | def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats) | python | def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for split_name, split_info in info.splits.items():
stats.append((split_info.num_examples, split_name.upper()))
# Sort reverse on number of examples.
stats.sort(reverse=True)
stats = "\n".join([
"{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats
])
return STATISTICS_TABLE.format(split_statistics=stats) | [
"def",
"make_statistics_information",
"(",
"info",
")",
":",
"if",
"not",
"info",
".",
"splits",
".",
"total_num_examples",
":",
"# That means that we have yet to calculate the statistics for this.",
"return",
"\"None computed\"",
"stats",
"=",
"[",
"(",
"info",
".",
"splits",
".",
"total_num_examples",
",",
"\"ALL\"",
")",
"]",
"for",
"split_name",
",",
"split_info",
"in",
"info",
".",
"splits",
".",
"items",
"(",
")",
":",
"stats",
".",
"append",
"(",
"(",
"split_info",
".",
"num_examples",
",",
"split_name",
".",
"upper",
"(",
")",
")",
")",
"# Sort reverse on number of examples.",
"stats",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"stats",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"{0:10} | {1:>10,}\"",
".",
"format",
"(",
"name",
",",
"num_exs",
")",
"for",
"(",
"num_exs",
",",
"name",
")",
"in",
"stats",
"]",
")",
"return",
"STATISTICS_TABLE",
".",
"format",
"(",
"split_statistics",
"=",
"stats",
")"
] | Make statistics information table. | [
"Make",
"statistics",
"information",
"table",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L337-L351 |
26,199 | tensorflow/datasets | tensorflow_datasets/scripts/document_datasets.py | dataset_docs_str | def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc | python | def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
"""
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc | [
"def",
"dataset_docs_str",
"(",
"datasets",
"=",
"None",
")",
":",
"module_to_builder",
"=",
"make_module_to_builder_dict",
"(",
"datasets",
")",
"sections",
"=",
"sorted",
"(",
"list",
"(",
"module_to_builder",
".",
"keys",
"(",
")",
")",
")",
"section_tocs",
"=",
"[",
"]",
"section_docs",
"=",
"[",
"]",
"for",
"section",
"in",
"sections",
":",
"builders",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"module_to_builder",
"[",
"section",
"]",
")",
"builders",
"=",
"sorted",
"(",
"builders",
",",
"key",
"=",
"lambda",
"b",
":",
"b",
".",
"name",
")",
"builder_docs",
"=",
"[",
"document_single_builder",
"(",
"builder",
")",
"for",
"builder",
"in",
"builders",
"]",
"section_doc",
"=",
"SECTION_DATASETS",
".",
"format",
"(",
"section_name",
"=",
"section",
",",
"datasets",
"=",
"\"\\n\"",
".",
"join",
"(",
"builder_docs",
")",
")",
"section_toc",
"=",
"create_section_toc",
"(",
"section",
",",
"builders",
")",
"section_docs",
".",
"append",
"(",
"section_doc",
")",
"section_tocs",
".",
"append",
"(",
"section_toc",
")",
"full_doc",
"=",
"DOC",
".",
"format",
"(",
"toc",
"=",
"\"\\n\"",
".",
"join",
"(",
"section_tocs",
")",
",",
"datasets",
"=",
"\"\\n\"",
".",
"join",
"(",
"section_docs",
")",
")",
"return",
"full_doc"
] | Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format). | [
"Create",
"dataset",
"documentation",
"string",
"for",
"given",
"datasets",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/document_datasets.py#L354-L383 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.