id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
3,166 | import collections
from beancount.core import data
ConfigError = collections.namedtuple('ConfigError', 'source message entry')
CommodityError = collections.namedtuple('CommodityError', 'source message entry')
The provided code snippet includes necessary dependencies for implementing the `validate_commodity_attr` function. Write a Python function `def validate_commodity_attr(entries, unused_options_map, config_str)` to solve the following problem:
Check that all Commodity directives have a valid attribute. Args: entries: A list of directives. unused_options_map: An options map. config_str: A configuration string. Returns: A list of new errors, if any were found.
Here is the function:
def validate_commodity_attr(entries, unused_options_map, config_str):
"""Check that all Commodity directives have a valid attribute.
Args:
entries: A list of directives.
unused_options_map: An options map.
config_str: A configuration string.
Returns:
A list of new errors, if any were found.
"""
errors = []
# pylint: disable=eval-used
config_obj = eval(config_str, {}, {})
if not isinstance(config_obj, dict):
errors.append(ConfigError(
data.new_metadata('<commodity_attr>', 0),
"Invalid configuration for commodity_attr plugin; skipping.", None))
return entries, errors
validmap = {attr: frozenset(values) if values is not None else None
for attr, values in config_obj.items()}
for entry in entries:
if not isinstance(entry, data.Commodity):
continue
for attr, values in validmap.items():
value = entry.meta.get(attr, None)
if value is None:
errors.append(CommodityError(
entry.meta,
"Missing attribute '{}' for Commodity directive {}".format(
attr, entry.currency), None))
continue
if values and value not in values:
errors.append(CommodityError(
entry.meta,
"Invalid value '{}' for attribute {}, Commodity".format(value, attr) +
" directive {}; valid options: {}".format(
entry.currency, ', '.join(values)), None))
return entries, errors | Check that all Commodity directives have a valid attribute. Args: entries: A list of directives. unused_options_map: An options map. config_str: A configuration string. Returns: A list of new errors, if any were found. |
3,167 | from beancount.core import data
from beancount.core import getters
The provided code snippet includes necessary dependencies for implementing the `auto_insert_open` function. Write a Python function `def auto_insert_open(entries, unused_options_map)` to solve the following problem:
Insert Open directives for accounts not opened. Open directives are inserted at the date of the first entry. Open directives for unused accounts are removed. Args: entries: A list of directives. unused_options_map: A parser options dict. Returns: A list of entries, possibly with more Open entries than before, and a list of errors.
Here is the function:
def auto_insert_open(entries, unused_options_map):
"""Insert Open directives for accounts not opened.
Open directives are inserted at the date of the first entry. Open directives
for unused accounts are removed.
Args:
entries: A list of directives.
unused_options_map: A parser options dict.
Returns:
A list of entries, possibly with more Open entries than before, and a
list of errors.
"""
opened_accounts = {entry.account
for entry in entries
if isinstance(entry, data.Open)}
new_entries = []
accounts_first, _ = getters.get_accounts_use_map(entries)
for index, (account, date_first_used) in enumerate(sorted(accounts_first.items())):
if account not in opened_accounts:
meta = data.new_metadata('<auto_accounts>', index)
new_entries.append(data.Open(meta, date_first_used, account,
None, None))
if new_entries:
new_entries.extend(entries)
new_entries.sort(key=data.entry_sortkey)
else:
new_entries = entries
return new_entries, [] | Insert Open directives for accounts not opened. Open directives are inserted at the date of the first entry. Open directives for unused accounts are removed. Args: entries: A list of directives. unused_options_map: A parser options dict. Returns: A list of entries, possibly with more Open entries than before, and a list of errors. |
3,168 | import collections
from beancount.core.number import ZERO
from beancount.core import data
from beancount.core import amount
from beancount.core import convert
from beancount.core import inventory
from beancount.core import account_types
from beancount.core import interpolate
from beancount.parser import options
SellGainsError = collections.namedtuple('SellGainsError', 'source message entry')
EXTRA_TOLERANCE_MULTIPLIER = 2
ZERO = Decimal()
The provided code snippet includes necessary dependencies for implementing the `validate_sell_gains` function. Write a Python function `def validate_sell_gains(entries, options_map)` to solve the following problem:
Check the sum of asset account totals for lots sold with a price on them. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found.
Here is the function:
def validate_sell_gains(entries, options_map):
"""Check the sum of asset account totals for lots sold with a price on them.
Args:
entries: A list of directives.
unused_options_map: An options map.
Returns:
A list of new errors, if any were found.
"""
errors = []
acc_types = options.get_account_types(options_map)
proceed_types = set([acc_types.assets,
acc_types.liabilities,
acc_types.equity,
acc_types.expenses])
for entry in entries:
if not isinstance(entry, data.Transaction):
continue
# Find transactions whose lots at cost all have a price.
postings_at_cost = [posting
for posting in entry.postings
if posting.cost is not None]
if not postings_at_cost or not all(posting.price is not None
for posting in postings_at_cost):
continue
# Accumulate the total expected proceeds and the sum of the asset and
# expenses legs.
total_price = inventory.Inventory()
total_proceeds = inventory.Inventory()
for posting in entry.postings:
# If the posting is held at cost, add the priced value to the balance.
if posting.cost is not None:
assert posting.price is not None
price = posting.price
total_price.add_amount(amount.mul(price, -posting.units.number))
else:
# Otherwise, use the weight and ignore postings to Income accounts.
atype = account_types.get_account_type(posting.account)
if atype in proceed_types:
total_proceeds.add_amount(convert.get_weight(posting))
# Compare inventories, currency by currency.
dict_price = {pos.units.currency: pos.units.number
for pos in total_price}
dict_proceeds = {pos.units.currency: pos.units.number
for pos in total_proceeds}
tolerances = interpolate.infer_tolerances(entry.postings, options_map)
invalid = False
for currency, price_number in dict_price.items():
# Accept a looser than usual tolerance because rounding occurs
# differently. Also, it would be difficult for the user to satisfy
# two sets of constraints manually.
tolerance = tolerances.get(currency) * EXTRA_TOLERANCE_MULTIPLIER
proceeds_number = dict_proceeds.pop(currency, ZERO)
diff = abs(price_number - proceeds_number)
if diff > tolerance:
invalid = True
break
if invalid or dict_proceeds:
errors.append(
SellGainsError(
entry.meta,
"Invalid price vs. proceeds/gains: {} vs. {}; difference: {}".format(
total_price, total_proceeds, (total_price + -total_proceeds)),
entry))
return entries, errors | Check the sum of asset account totals for lots sold with a price on them. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found. |
3,169 | import collections
from beancount.core import data
UniquePricesError = collections.namedtuple('UniquePricesError', 'source message entry')
The provided code snippet includes necessary dependencies for implementing the `validate_unique_prices` function. Write a Python function `def validate_unique_prices(entries, unused_options_map)` to solve the following problem:
Check that there is only a single price per day for a particular base/quote. Args: entries: A list of directives. We're interested only in the Transaction instances. unused_options_map: A parser options dict. Returns: The list of input entries, and a list of new UniquePricesError instances generated.
Here is the function:
def validate_unique_prices(entries, unused_options_map):
"""Check that there is only a single price per day for a particular base/quote.
Args:
entries: A list of directives. We're interested only in the Transaction instances.
unused_options_map: A parser options dict.
Returns:
The list of input entries, and a list of new UniquePricesError instances generated.
"""
new_entries = []
errors = []
prices = collections.defaultdict(list)
for entry in entries:
if not isinstance(entry, data.Price):
continue
key = (entry.date, entry.currency, entry.amount.currency)
prices[key].append(entry)
errors = []
for price_entries in prices.values():
if len(price_entries) > 1:
number_map = {price_entry.amount.number: price_entry
for price_entry in price_entries}
if len(number_map) > 1:
# Note: This should be a list of entries for better error
# reporting. (Later.)
error_entry = next(iter(number_map.values()))
errors.append(
UniquePricesError(error_entry.meta,
"Disagreeing price entries",
price_entries))
return entries, errors | Check that there is only a single price per day for a particular base/quote. Args: entries: A list of directives. We're interested only in the Transaction instances. unused_options_map: A parser options dict. Returns: The list of input entries, and a list of new UniquePricesError instances generated. |
3,170 | import collections
from beancount.core import data
CoherentCostError = collections.namedtuple('CoherentCostError', 'source message entry')
The provided code snippet includes necessary dependencies for implementing the `validate_coherent_cost` function. Write a Python function `def validate_coherent_cost(entries, unused_options_map)` to solve the following problem:
Check that all currencies are either used at cost or not at all, but never both. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found.
Here is the function:
def validate_coherent_cost(entries, unused_options_map):
"""Check that all currencies are either used at cost or not at all, but never both.
Args:
entries: A list of directives.
unused_options_map: An options map.
Returns:
A list of new errors, if any were found.
"""
errors = []
with_cost = {}
without_cost = {}
for entry in data.filter_txns(entries):
for posting in entry.postings:
target_set = without_cost if posting.cost is None else with_cost
currency = posting.units.currency
target_set.setdefault(currency, entry)
for currency in set(with_cost) & set(without_cost):
errors.append(
CoherentCostError(
without_cost[currency].meta,
"Currency '{}' is used both with and without cost".format(currency),
with_cost[currency]))
# Note: We really ought to include both of the first transactions here.
return entries, errors | Check that all currencies are either used at cost or not at all, but never both. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found. |
3,171 | import collections
from beancount.core import data
from beancount.core import getters
UnusedAccountError = collections.namedtuple('UnusedAccountError', 'source message entry')
The provided code snippet includes necessary dependencies for implementing the `validate_unused_accounts` function. Write a Python function `def validate_unused_accounts(entries, unused_options_map)` to solve the following problem:
Check that all accounts declared open are actually used. We check that all of the accounts that are open are at least referred to by another directive. These are probably unused, so issue a warning (we like to be pedantic). Note that an account that is open and then closed is considered used--this is a valid use case that may occur in reality. If you have a use case for an account to be open but never used, you can quiet that warning by initializing the account with a balance asserts or a pad directive, or even use a note will be sufficient. (This is probably a good candidate for optional inclusion as a "pedantic" plugin.) Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found.
Here is the function:
def validate_unused_accounts(entries, unused_options_map):
"""Check that all accounts declared open are actually used.
We check that all of the accounts that are open are at least referred to by
another directive. These are probably unused, so issue a warning (we like to
be pedantic). Note that an account that is open and then closed is
considered used--this is a valid use case that may occur in reality. If you
have a use case for an account to be open but never used, you can quiet that
warning by initializing the account with a balance asserts or a pad
directive, or even use a note will be sufficient.
(This is probably a good candidate for optional inclusion as a "pedantic"
plugin.)
Args:
entries: A list of directives.
unused_options_map: An options map.
Returns:
A list of new errors, if any were found.
"""
# Find all the accounts referenced by entries which are not Open, and the
# open directives for error reporting below.
open_map = {}
referenced_accounts = set()
for entry in entries:
if isinstance(entry, data.Open):
open_map[entry.account] = entry
continue
referenced_accounts.update(getters.get_entry_accounts(entry))
# Create a list of suitable errors, with the location of the Open directives
# corresponding to the unused accounts.
errors = [UnusedAccountError(open_entry.meta,
"Unused account '{}'".format(account),
open_entry)
for account, open_entry in open_map.items()
if account not in referenced_accounts]
return entries, errors | Check that all accounts declared open are actually used. We check that all of the accounts that are open are at least referred to by another directive. These are probably unused, so issue a warning (we like to be pedantic). Note that an account that is open and then closed is considered used--this is a valid use case that may occur in reality. If you have a use case for an account to be open but never used, you can quiet that warning by initializing the account with a balance asserts or a pad directive, or even use a note will be sufficient. (This is probably a good candidate for optional inclusion as a "pedantic" plugin.) Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found. |
3,172 | import collections
from beancount.core.data import Transaction
from beancount.core import data
from beancount.core import amount
from beancount.core import inventory
METADATA_FIELD = "__implicit_prices__"
class Transaction(NamedTuple):
"""
A transaction! This is the main type of object that we manipulate, and the
entire reason this whole project exists in the first place, because
representing these types of structures with a spreadsheet is difficult.
Attributes:
meta: See above.
date: See above.
flag: A single-character string or None. This user-specified string
represents some custom/user-defined state of the transaction. You can use
this for various purposes. Otherwise common, pre-defined flags are defined
under beancount.core.flags, to flags transactions that are automatically
generated.
payee: A free-form string that identifies the payee, or None, if absent.
narration: A free-form string that provides a description for the transaction.
All transactions have at least a narration string, this is never None.
tags: A set of tag strings (without the '#'), or EMPTY_SET.
links: A set of link strings (without the '^'), or EMPTY_SET.
postings: A list of Posting instances, the legs of this transaction. See the
doc under Posting above.
"""
meta: Meta
date: datetime.date
flag: Flag
payee: Optional[str]
narration: str
tags: Set
links: Set
postings: List[Posting]
The provided code snippet includes necessary dependencies for implementing the `add_implicit_prices` function. Write a Python function `def add_implicit_prices(entries, unused_options_map)` to solve the following problem:
Insert implicitly defined prices from Transactions. Explicit price entries are simply maintained in the output list. Prices from postings with costs or with prices from Transaction entries are synthesized as new Price entries in the list of entries output. Args: entries: A list of directives. We're interested only in the Transaction instances. unused_options_map: A parser options dict. Returns: A list of entries, possibly with more Price entries than before, and a list of errors.
Here is the function:
def add_implicit_prices(entries, unused_options_map):
"""Insert implicitly defined prices from Transactions.
Explicit price entries are simply maintained in the output list. Prices from
postings with costs or with prices from Transaction entries are synthesized
as new Price entries in the list of entries output.
Args:
entries: A list of directives. We're interested only in the Transaction instances.
unused_options_map: A parser options dict.
Returns:
A list of entries, possibly with more Price entries than before, and a
list of errors.
"""
new_entries = []
errors = []
# A dict of (date, currency, cost-currency) to price entry.
new_price_entry_map = {}
balances = collections.defaultdict(inventory.Inventory)
for entry in entries:
# Always replicate the existing entries.
new_entries.append(entry)
if isinstance(entry, Transaction):
# Inspect all the postings in the transaction.
for posting in entry.postings:
units = posting.units
cost = posting.cost
# Check if the position is matching against an existing
# position.
_, booking = balances[posting.account].add_position(posting)
# Add prices when they're explicitly specified on a posting. An
# explicitly specified price may occur in a conversion, e.g.
# Assets:Account 100 USD @ 1.10 CAD
# or, if a cost is also specified, as the current price of the
# underlying instrument, e.g.
# Assets:Account 100 HOOL {564.20} @ {581.97} USD
if posting.price is not None:
meta = data.new_metadata(entry.meta["filename"], entry.meta["lineno"])
meta[METADATA_FIELD] = "from_price"
price_entry = data.Price(meta, entry.date,
units.currency,
posting.price)
# Add costs, when we're not matching against an existing
# position. This happens when we're just specifying the cost,
# e.g.
# Assets:Account 100 HOOL {564.20}
elif (cost is not None and
booking != inventory.MatchResult.REDUCED):
# TODO(blais): What happens here if the account has no
# booking strategy? Do we end up inserting a price for the
# reducing leg? Check.
meta = data.new_metadata(entry.meta["filename"], entry.meta["lineno"])
meta[METADATA_FIELD] = "from_cost"
price_entry = data.Price(meta, entry.date,
units.currency,
amount.Amount(cost.number, cost.currency))
else:
price_entry = None
if price_entry is not None:
key = (price_entry.date,
price_entry.currency,
price_entry.amount.number, # Ideally should be removed.
price_entry.amount.currency)
try:
new_price_entry_map[key]
## Do not fail for now. We still have many valid use
## cases of duplicate prices on the same date, for
## example, stock splits, or trades on two dates with
## two separate reported prices. We need to figure out a
## more elegant solution for this in the long term.
## Keeping both for now. We should ideally not use the
## number in the de-dup key above.
#
# dup_entry = new_price_entry_map[key]
# if price_entry.amount.number == dup_entry.amount.number:
# # Skip duplicates.
# continue
# else:
# errors.append(
# ImplicitPriceError(
# entry.meta,
# "Duplicate prices for {} on {}".format(entry,
# dup_entry),
# entry))
except KeyError:
new_price_entry_map[key] = price_entry
new_entries.append(price_entry)
return new_entries, errors | Insert implicitly defined prices from Transactions. Explicit price entries are simply maintained in the output list. Prices from postings with costs or with prices from Transaction entries are synthesized as new Price entries in the list of entries output. Args: entries: A list of directives. We're interested only in the Transaction instances. unused_options_map: A parser options dict. Returns: A list of entries, possibly with more Price entries than before, and a list of errors. |
3,173 | from beancount.core import compare
The provided code snippet includes necessary dependencies for implementing the `validate_no_duplicates` function. Write a Python function `def validate_no_duplicates(entries, unused_options_map)` to solve the following problem:
Check that the entries are unique, by computing hashes. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found.
Here is the function:
def validate_no_duplicates(entries, unused_options_map):
"""Check that the entries are unique, by computing hashes.
Args:
entries: A list of directives.
unused_options_map: An options map.
Returns:
A list of new errors, if any were found.
"""
unused_hashes, errors = compare.hash_entries(entries, exclude_meta=True)
return entries, errors | Check that the entries are unique, by computing hashes. Args: entries: A list of directives. unused_options_map: An options map. Returns: A list of new errors, if any were found. |
3,174 | import collections
from beancount.core.number import ZERO
from beancount.core.number import D
from beancount.core.data import Transaction
from beancount.core.data import Booking
from beancount.core import getters
from beancount.core import inventory
MatchBasisError = collections.namedtuple('MatchBasisError', 'source message entry')
DEFAULT_TOLERANCE = 0.01
ZERO = Decimal()
def D(strord=None):
"""Convert a string into a Decimal object.
This is used in parsing amounts from files in the importers. This is the
main function you should use to build all numbers the system manipulates
(never use floating-point in an accounting system). Commas are stripped and
ignored, as they are assumed to be thousands separators (the French comma
separator as decimal is not supported). This function just returns the
argument if it is already a Decimal object, for convenience.
Args:
strord: A string or Decimal instance.
Returns:
A Decimal instance.
"""
try:
# Note: try a map lookup and optimize performance here.
if strord is None or strord == '':
return Decimal()
elif isinstance(strord, str):
return Decimal(_CLEAN_NUMBER_RE.sub('', strord))
elif isinstance(strord, Decimal):
return strord
elif isinstance(strord, (int, float)):
return Decimal(strord)
else:
assert strord is None, "Invalid value to convert: {}".format(strord)
except Exception as exc:
raise ValueError("Impossible to create Decimal instance from {!s}: {}".format(
strord, exc)) from exc
class Booking(enum.Enum):
# Reject ambiguous matches with an error.
STRICT = 'STRICT'
# Strict booking method, but disambiguate further with sizes. Reject
# ambiguous matches with an error but if a lot matches the size exactly,
# accept it the oldest.
STRICT_WITH_SIZE = 'STRICT_WITH_SIZE'
# Disable matching and accept the creation of mixed inventories.
NONE = 'NONE'
# Average cost booking: merge all matching lots before and after.
AVERAGE = 'AVERAGE'
# First-in first-out in the case of ambiguity.
FIFO = 'FIFO'
# Last-in first-out in the case of ambiguity.
LIFO = 'LIFO'
# Highest-in first-out in the case of ambiguity.
HIFO = 'HIFO'
class Transaction(NamedTuple):
"""
A transaction! This is the main type of object that we manipulate, and the
entire reason this whole project exists in the first place, because
representing these types of structures with a spreadsheet is difficult.
Attributes:
meta: See above.
date: See above.
flag: A single-character string or None. This user-specified string
represents some custom/user-defined state of the transaction. You can use
this for various purposes. Otherwise common, pre-defined flags are defined
under beancount.core.flags, to flags transactions that are automatically
generated.
payee: A free-form string that identifies the payee, or None, if absent.
narration: A free-form string that provides a description for the transaction.
All transactions have at least a narration string, this is never None.
tags: A set of tag strings (without the '#'), or EMPTY_SET.
links: A set of link strings (without the '^'), or EMPTY_SET.
postings: A list of Posting instances, the legs of this transaction. See the
doc under Posting above.
"""
meta: Meta
date: datetime.date
flag: Flag
payee: Optional[str]
narration: str
tags: Set
links: Set
postings: List[Posting]
The provided code snippet includes necessary dependencies for implementing the `validate_average_cost` function. Write a Python function `def validate_average_cost(entries, options_map, config_str=None)` to solve the following problem:
Check that reducing legs on unbooked postings are near the average cost basis. Args: entries: A list of directives. unused_options_map: An options map. config_str: The configuration as a string version of a float. Returns: A list of new errors, if any were found.
Here is the function:
def validate_average_cost(entries, options_map, config_str=None):
"""Check that reducing legs on unbooked postings are near the average cost basis.
Args:
entries: A list of directives.
unused_options_map: An options map.
config_str: The configuration as a string version of a float.
Returns:
A list of new errors, if any were found.
"""
# Initialize tolerance bounds.
if config_str and config_str.strip():
# pylint: disable=eval-used
config_obj = eval(config_str, {}, {})
if not isinstance(config_obj, float):
raise RuntimeError("Invalid configuration for check_average_cost: "
"must be a float")
tolerance = config_obj
else:
tolerance = DEFAULT_TOLERANCE
min_tolerance = D(1 - tolerance)
max_tolerance = D(1 + tolerance)
errors = []
ocmap = getters.get_account_open_close(entries)
balances = collections.defaultdict(inventory.Inventory)
for entry in entries:
if isinstance(entry, Transaction):
for posting in entry.postings:
dopen = ocmap.get(posting.account, None)
# Only process accounts with a NONE booking value.
if dopen and dopen[0] and dopen[0].booking == Booking.NONE:
balance = balances[(posting.account,
posting.units.currency,
posting.cost.currency if posting.cost else None)]
if posting.units.number < ZERO:
average = balance.average().get_only_position()
if average is not None:
number = average.cost.number
min_valid = number * min_tolerance
max_valid = number * max_tolerance
if not (min_valid <= posting.cost.number <= max_valid):
errors.append(
MatchBasisError(
entry.meta,
("Cost basis on reducing posting is too far from "
"the average cost ({} vs. {})".format(
posting.cost.number, average.cost.number)),
entry))
balance.add_position(posting)
return entries, errors | Check that reducing legs on unbooked postings are near the average cost basis. Args: entries: A list of directives. unused_options_map: An options map. config_str: The configuration as a string version of a float. Returns: A list of new errors, if any were found. |
3,175 | from beancount.core import data
from beancount.core.data import Open, Close
class Open(NamedTuple):
"""
An "open account" directive.
Attributes:
meta: See above.
date: See above.
account: A string, the name of the account that is being opened.
currencies: A list of strings, currencies that are allowed in this account.
May be None, in which case it means that there are no restrictions on which
currencies may be stored in this account.
booking: A Booking enum, the booking method to use to disambiguate
postings to this account (when zero or more than one postings match the
specification), or None if not specified. In practice, this attribute will
be should be left unspecified (None) in the vast majority of cases. See
Booking below for a selection of valid methods.
"""
meta: Meta
date: datetime.date
account: Account
currencies: List[Currency]
booking: Optional[Booking]
class Close(NamedTuple):
"""
A "close account" directive.
Attributes:
meta: See above.
date: See above.
account: A string, the name of the account that is being closed.
"""
meta: Meta
date: datetime.date
account: Account
The provided code snippet includes necessary dependencies for implementing the `close_tree` function. Write a Python function `def close_tree(entries, unused_options_map)` to solve the following problem:
Insert close entries for all subaccounts of a closed account. Args: entries: A list of directives. We're interested only in the Open/Close instances. unused_options_map: A parser options dict. Returns: A tuple of entries and errors.
Here is the function:
def close_tree(entries, unused_options_map):
"""Insert close entries for all subaccounts of a closed account.
Args:
entries: A list of directives. We're interested only in the Open/Close instances.
unused_options_map: A parser options dict.
Returns:
A tuple of entries and errors.
"""
new_entries = []
errors = []
opens = set(entry.account for entry in entries if isinstance(entry, Open))
closes = set(entry.account for entry in entries if isinstance(entry, Close))
for entry in entries:
if isinstance(entry, Close):
subaccounts = [
account
for account in opens
if account.startswith(entry.account + ":") and account not in closes
]
for subacc in subaccounts:
meta = data.new_metadata("<beancount.plugins.close_tree>", 0)
close_entry = data.Close(meta, entry.date, subacc)
new_entries.append(close_entry)
# So we don't attempt to re-close a grandchild that a child closed
closes.add(subacc)
if entry.account in opens:
new_entries.append(entry)
else:
new_entries.append(entry)
return new_entries, errors | Insert close entries for all subaccounts of a closed account. Args: entries: A list of directives. We're interested only in the Open/Close instances. unused_options_map: A parser options dict. Returns: A tuple of entries and errors. |
3,176 | import collections
from beancount.core.data import Posting
from beancount.core.data import Transaction
from beancount.core import account
from beancount.core import convert
from beancount.core import data
from beancount.core import inventory
META_PROCESSED = 'currency_accounts_processed'
DEFAULT_BASE_ACCOUNT = 'Equity:CurrencyAccounts'
def group_postings_by_weight_currency(entry: Transaction):
"""Return where this entry might require adjustment."""
curmap = collections.defaultdict(list)
has_price = False
for posting in entry.postings:
currency = posting.units.currency
if posting.cost:
currency = posting.cost.currency
if posting.price:
assert posting.price.currency == currency
elif posting.price:
has_price = True
currency = posting.price.currency
if posting.price:
has_price = True
curmap[currency].append(posting)
return curmap, has_price
def get_neutralizing_postings(curmap, base_account, new_accounts):
"""Process an entry.
Args:
curmap: A dict of currency to a list of Postings of this transaction.
base_account: A string, the root account name to insert.
new_accounts: A set, a mutable accumulator of new account names.
Returns:
A modified entry, with new postings inserted to rebalance currency trading
accounts.
"""
new_postings = []
for currency, postings in curmap.items():
# Compute the per-currency balance.
inv = inventory.Inventory()
for posting in postings:
inv.add_amount(convert.get_cost(posting))
if inv.is_empty():
new_postings.extend(postings)
continue
# Re-insert original postings and remove price conversions.
#
# Note: This may cause problems if the implicit_prices plugin is
# configured to run after this one, or if you need the price annotations
# for some scripting or serious work.
#
# FIXME: We need to handle these important cases (they're not frivolous,
# this is a prototype), probably by inserting some exceptions with
# collaborating code in the booking (e.g. insert some metadata that
# disables price conversions on those postings).
#
# FIXME(2): Ouch! Some of the residual seeps through here, where there
# are more than a single currency block. This needs fixing too. You can
# easily mitigate some of this to some extent, by excluding transactions
# which don't have any price conversion in them.
for pos in postings:
if pos.price is not None:
pos = pos._replace(price=None)
new_postings.append(pos)
# Insert the currency trading accounts postings.
amount = inv.get_only_position().units
acc = account.join(base_account, currency)
new_accounts.add(acc)
new_postings.append(
Posting(acc, -amount, None, None, None, None))
return new_postings
class Transaction(NamedTuple):
"""
A transaction! This is the main type of object that we manipulate, and the
entire reason this whole project exists in the first place, because
representing these types of structures with a spreadsheet is difficult.
Attributes:
meta: See above.
date: See above.
flag: A single-character string or None. This user-specified string
represents some custom/user-defined state of the transaction. You can use
this for various purposes. Otherwise common, pre-defined flags are defined
under beancount.core.flags, to flags transactions that are automatically
generated.
payee: A free-form string that identifies the payee, or None, if absent.
narration: A free-form string that provides a description for the transaction.
All transactions have at least a narration string, this is never None.
tags: A set of tag strings (without the '#'), or EMPTY_SET.
links: A set of link strings (without the '^'), or EMPTY_SET.
postings: A list of Posting instances, the legs of this transaction. See the
doc under Posting above.
"""
meta: Meta
date: datetime.date
flag: Flag
payee: Optional[str]
narration: str
tags: Set
links: Set
postings: List[Posting]
The provided code snippet includes necessary dependencies for implementing the `insert_currency_trading_postings` function. Write a Python function `def insert_currency_trading_postings(entries, options_map, config)` to solve the following problem:
Insert currency trading postings. Args: entries: A list of directives. unused_options_map: An options map. config: The base account name for currency trading accounts. Returns: A list of new errors, if any were found.
Here is the function:
def insert_currency_trading_postings(entries, options_map, config):
"""Insert currency trading postings.
Args:
entries: A list of directives.
unused_options_map: An options map.
config: The base account name for currency trading accounts.
Returns:
A list of new errors, if any were found.
"""
base_account = config.strip()
if not account.is_valid(base_account):
base_account = DEFAULT_BASE_ACCOUNT
errors = []
new_entries = []
new_accounts = set()
for entry in entries:
if isinstance(entry, Transaction):
curmap, has_price = group_postings_by_weight_currency(entry)
if has_price and len(curmap) > 1:
new_postings = get_neutralizing_postings(
curmap, base_account, new_accounts)
entry = entry._replace(postings=new_postings)
if META_PROCESSED:
entry.meta[META_PROCESSED] = True
new_entries.append(entry)
earliest_date = entries[0].date
open_entries = [
data.Open(data.new_metadata('<currency_accounts>', index),
earliest_date, acc, None, None)
for index, acc in enumerate(sorted(new_accounts))]
return open_entries + new_entries, errors | Insert currency trading postings. Args: entries: A list of directives. unused_options_map: An options map. config: The base account name for currency trading accounts. Returns: A list of new errors, if any were found. |
3,177 | import argparse
import collections
import re
import sys
The provided code snippet includes necessary dependencies for implementing the `dump_tree` function. Write a Python function `def dump_tree(node, file=sys.stdout, prefix='')` to solve the following problem:
Render a tree as a tree. Args: node: An instance of Node. file: A file object to write to. prefix: A prefix string for each of the lines of the children.
Here is the function:
def dump_tree(node, file=sys.stdout, prefix=''):
"""Render a tree as a tree.
Args:
node: An instance of Node.
file: A file object to write to.
prefix: A prefix string for each of the lines of the children.
"""
file.write(prefix)
file.write(node.name)
file.write('\n')
for child in node:
dump_tree(child, file, prefix + '... ') | Render a tree as a tree. Args: node: An instance of Node. file: A file object to write to. prefix: A prefix string for each of the lines of the children. |
3,178 | import argparse
import collections
import re
import sys
DEFAULT_PATTERN = (r"(Assets|Liabilities|Equity|Income|Expenses)"
r"(:[A-Z][A-Za-z0-9-_']*)*")
DEFAULT_DELIMITER = "[ \t]+"
DEFAULT_SPLITTER = ":"
LOOSE_PATTERN = r"\b([A-Za-z0-9-_']+)(:[A-Za-z0-9-_']+)+\b"
LOOSE_SPLITTER = r":"
FILENAME_PATTERN = r"([^ \t]*)(/[^ \t]*)+"
FILENAME_SPLITTER = r"/"
def find_column(lines, pattern, delimiter):
"""Find a valid column with hierarchical data in the text lines.
Args:
lines: A list of strings, the contents of the input.
pattern: A regular expression for the hierarchical entries.
delimiter: A regular expression that dictates how we detect the
end of a column. Normally this is a single space. If the patterns
contain spaces, you will need to increase this.
Returns:
A tuple of
matches: A list of (line-number, name) tuples where 'name' is the
hierarchical string to treeify and line-number is an integer, the
line number where this applies.
left: An integer, the leftmost column.
right: An integer, the rightmost column.
Note that not all line numbers may be present, so you may need to
skip some. However, they are in guaranteed in sorted order.
"""
# A mapping of the line beginning position to its match object.
beginnings = collections.defaultdict(list)
pattern_and_whitespace = "({})(?P<ws>{}.|$)".format(pattern, delimiter)
for no, line in enumerate(lines):
for match in re.finditer(pattern_and_whitespace, line):
beginnings[match.start()].append((no, line, match))
# For each potential column found, verify that it is valid. A valid column
# will have the maximum of its content text not overlap with any of the
# following text. We assume that a column will have been formatted to full
# width and that no text following the line overlap with the column, even in
# its trailing whitespace.
#
# In other words, the following example is a violation because "10,990.74"
# overlaps with the end of "Insurance" and so this would not be recognized
# as a valid column:
#
# Expenses:Food:Restaurant 10,990.74 USD
# Expenses:Health:Dental:Insurance 208.80 USD
#
for leftmost_column, column_matches in sorted(beginnings.items()):
# Compute the location of the rightmost column of text.
rightmost_column = max(match.end(1) for _, _, match in column_matches)
# Compute the leftmost location of the content following the column text
# and past its whitespace.
following_column = min(match.end() if match.group('ws') else 10000
for _, _, match in column_matches)
if rightmost_column < following_column:
# We process only the very first match.
return_matches = [(no, match.group(1).rstrip())
for no, _, match in column_matches]
return return_matches, leftmost_column, rightmost_column
def create_tree(column_matches, regexp_split):
"""Build up a tree from a list of matches.
Args:
column_matches: A list of (line-number, name) pairs.
regexp_split: A regular expression string, to use for splitting the names
of components.
Returns:
An instance of Node, the root node of the created tree.
"""
root = Node('')
for no, name in column_matches:
parts = re.split(regexp_split, name)
node = root
for part in parts:
last_node = node[-1] if node else None
if last_node is None or last_node.name != part:
last_node = Node(part)
node.append(last_node)
node = last_node
node.nos.append(no)
return root
def render_tree(root):
"""Render a tree of nodes.
Returns:
A list of tuples of (first_line, continuation_line, node) where
first_line: A string, the first line to render, which includes the
account name.
continuation_line: A string, further line to render if necessary.
node: The Node instance which corresponds to this line.
and an integer, the width of the new columns.
"""
# Compute all the lines ahead of time in order to calculate the width.
lines = []
# Start with the root node. We push the constant prefix before this node,
# the account name, and the RealAccount instance. We will maintain a stack
# of children nodes to render.
stack = [('', root.name, root, True)]
while stack:
prefix, name, node, is_last = stack.pop(-1)
if node is root:
# For the root node, we don't want to render any prefix.
first = cont = ''
else:
# Compute the string that precedes the name directly and the one below
# that for the continuation lines.
# |
# @@@ Bank1 <----------------
# @@@ |
# | |-- Checking
if is_last:
first = prefix + PREFIX_LEAF_1
cont = prefix + PREFIX_LEAF_C
else:
first = prefix + PREFIX_CHILD_1
cont = prefix + PREFIX_CHILD_C
# Compute the name to render for continuation lines.
# |
# |-- Bank1
# | @@@ <----------------
# | |-- Checking
if len(node) > 0:
cont_name = PREFIX_CHILD_C
else:
cont_name = PREFIX_LEAF_C
# Add a line for this account.
if not (node is root and not name):
lines.append((first + name,
cont + cont_name,
node))
# Push the children onto the stack, being careful with ordering and
# marking the last node as such.
if node:
child_items = reversed(node)
child_iter = iter(child_items)
child_node = next(child_iter)
stack.append((cont, child_node.name, child_node, True))
for child_node in child_iter:
stack.append((cont, child_node.name, child_node, False))
if not lines:
return lines
# Compute the maximum width of the lines and convert all of them to the same
# maximal width. This makes it easy on the client.
max_width = max(len(first_line) for first_line, _, __ in lines)
line_format = '{{:{width}}}'.format(width=max_width)
return [(line_format.format(first_line),
line_format.format(cont_line),
node)
for (first_line, cont_line, node) in lines], max_width
def enum_tree_by_input_line_num(tree_lines):
"""Accumulate the lines of a tree until a line number is found.
Args:
tree_lines: A list of lines as returned by render_tree.
Yields:
Pairs of (line number, list of (line, node)).
"""
pending = []
for first_line, cont_line, node in tree_lines:
if not node.nos:
pending.append((first_line, node))
else:
line = first_line
for no in node.nos:
pending.append((line, node))
line = cont_line
yield (no, pending)
pending = []
if pending:
yield (None, pending)
def _main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('input', nargs='?', action='store',
help='Name of the file to process (default: stdin)')
parser.add_argument('-o', '--output', action='store',
help='Name of the file to write (default: stdout)')
parser.add_argument('-r', '--pattern', action='store',
default=None,
help=("Pattern for repeatable components "
"(default: \"{}\")".format(DEFAULT_PATTERN)))
parser.add_argument('-d', '--delimiter', action='store',
default=DEFAULT_DELIMITER,
help=("Delimiter pattern to detect the end of a column text. "
"If your pattens contain strings, you may want to set this "
"to a longer string, like ' {{2,}}' "
"(default: \"{}\")").format(DEFAULT_DELIMITER))
parser.add_argument('-s', '--split', action='store',
default=DEFAULT_SPLITTER,
help="Pattern splitting into components (default: \"{}\")".format(
DEFAULT_SPLITTER))
parser.add_argument('-F', '--filenames', action='store_true',
help="Use pattern and split suitable for filenames")
parser.add_argument('-A', '--loose-accounts', action='store_true',
help="Use pattern and split suitable for loose account names")
parser.add_argument('--filler', action='store',
default=' ',
help="Filler string for new lines inserted for formatting")
args = parser.parse_args()
if sum(1 if expr else 0 for expr in (args.filenames,
args.loose_accounts,
args.pattern)) > 1:
parser.error("Conflicted pattern options")
if args.pattern is None:
args.pattern = DEFAULT_PATTERN
if args.filenames:
# Note: you could spit an error if the other options are set: "you may
# not use --filenames and specify a pattern or split" or somesuch.
args.pattern = FILENAME_PATTERN
args.split = FILENAME_SPLITTER
elif args.loose_accounts:
args.pattern = LOOSE_PATTERN
args.split = LOOSE_SPLITTER
# Open input and output files.
input_file = open(args.input, 'r') if args.input else sys.stdin
output_file = open(args.output, 'w') if args.output else sys.stdout
lines = list(input_file)
# Find a column in the file. If not found, this will return None.
result = find_column(lines, args.pattern, args.delimiter)
if result is None:
print("WARNING: Could not find any valid column in input",
file=sys.stderr)
for line in lines:
output_file.write(line)
output_file.close()
return -1
column_matches, left, right = result
# Process the input lines again, this time with the column.
root = create_tree(column_matches, args.split)
# Render the tree we just inferred from the list of names.
tree_lines, new_column_width = render_tree(root)
# Compute minimum width of the resulting tree. It should not be less than
# the original width of the column.
old_column_width = right - left
line_format = "{{:{}}}".format(max(old_column_width, new_column_width))
# Iterate the tree by input line number. This is done so that we can can
# render the new hierarchy lines as closely as possible to real input
# lines... we delay rendering those until we have a match with a real line
# to render.
tree_iter = enum_tree_by_input_line_num(tree_lines)
no, next_tree_lines = next(tree_iter)
# Iterate over the input lines, rendering the tree lines only when
# necessary.
input_lines_iter = iter(enumerate(lines))
for input_no, input_line in input_lines_iter:
if input_no < no:
# Catch up to the next line we need to render for the tree by
# forwarding just the input line.
output_file.write(input_line)
else:
assert input_no == no, (input_no, no)
for line, node in next_tree_lines:
if not node.nos:
# Render new lines, inserted just for the hierarchy.
prefix_string = args.filler * (left//len(args.filler)+1)
output_file.write(prefix_string[:left])
output_file.write(line.rstrip())
output_file.write('\n')
else:
# Render lines that replace previous lines, with prefix and
# suffix.
prefix = input_line[:left]
suffix = input_line[right:].rstrip('\r\n')
out_line = prefix + line_format.format(line) + suffix
output_file.write(out_line.rstrip())
output_file.write('\n')
try:
no, next_tree_lines = next(tree_iter)
except StopIteration:
break
# Render the rest of the input lines after the tree is done rendering.
for _, input_line in input_lines_iter:
output_file.write(input_line) | null |
3,179 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `deprecated` function. Write a Python function `def deprecated(message)` to solve the following problem:
A decorator generator to mark functions as deprecated and log a warning.
Here is the function:
def deprecated(message):
"""A decorator generator to mark functions as deprecated and log a warning."""
def decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}: {}".format(func.__name__,
message),
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
return new_func
return decorator | A decorator generator to mark functions as deprecated and log a warning. |
3,180 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `box` function. Write a Python function `def box(name=None, file=None)` to solve the following problem:
A context manager that prints out a box around a block. This is useful for printing out stuff from tests in a way that is readable. Args: name: A string, the name of the box to use. file: The file object to print to. Yields: None.
Here is the function:
def box(name=None, file=None):
"""A context manager that prints out a box around a block.
This is useful for printing out stuff from tests in a way that is readable.
Args:
name: A string, the name of the box to use.
file: The file object to print to.
Yields:
None.
"""
file = file or sys.stdout
file.write('\n')
if name:
header = ',--------({})--------\n'.format(name)
footer = '`{}\n'.format('-' * (len(header)-2))
else:
header = ',----------------\n'
footer = '`----------------\n'
file.write(header)
yield
file.write(footer)
file.flush() | A context manager that prints out a box around a block. This is useful for printing out stuff from tests in a way that is readable. Args: name: A string, the name of the box to use. file: The file object to print to. Yields: None. |
3,181 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `longest` function. Write a Python function `def longest(seq)` to solve the following problem:
Return the longest of the given subsequences. Args: seq: An iterable sequence of lists. Returns: The longest list from the sequence.
Here is the function:
def longest(seq):
"""Return the longest of the given subsequences.
Args:
seq: An iterable sequence of lists.
Returns:
The longest list from the sequence.
"""
longest, length = None, -1
for element in seq:
len_element = len(element)
if len_element > length:
longest, length = element, len_element
return longest | Return the longest of the given subsequences. Args: seq: An iterable sequence of lists. Returns: The longest list from the sequence. |
3,182 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `get_tuple_values` function. Write a Python function `def get_tuple_values(ntuple, predicate, memo=None)` to solve the following problem:
Return all members referred to by this namedtuple instance that satisfy the given predicate. This function also works recursively on its members which are lists or tuples, and so it can be used for Transaction instances. Args: ntuple: A tuple or namedtuple. predicate: A predicate function that returns true if an attribute is to be output. memo: An optional memoizing dictionary. If a tuple has already been seen, the recursion will be avoided. Yields: Attributes of the tuple and its sub-elements if the predicate is true.
Here is the function:
def get_tuple_values(ntuple, predicate, memo=None):
"""Return all members referred to by this namedtuple instance that satisfy the
given predicate. This function also works recursively on its members which
are lists or tuples, and so it can be used for Transaction instances.
Args:
ntuple: A tuple or namedtuple.
predicate: A predicate function that returns true if an attribute is to be
output.
memo: An optional memoizing dictionary. If a tuple has already been seen, the
recursion will be avoided.
Yields:
Attributes of the tuple and its sub-elements if the predicate is true.
"""
if memo is None:
memo = set()
id_ntuple = id(ntuple)
if id_ntuple in memo:
return
memo.add(id_ntuple)
if predicate(ntuple):
yield
for attribute in ntuple:
if predicate(attribute):
yield attribute
if isinstance(attribute, (list, tuple)):
for value in get_tuple_values(attribute, predicate, memo):
yield value | Return all members referred to by this namedtuple instance that satisfy the given predicate. This function also works recursively on its members which are lists or tuples, and so it can be used for Transaction instances. Args: ntuple: A tuple or namedtuple. predicate: A predicate function that returns true if an attribute is to be output. memo: An optional memoizing dictionary. If a tuple has already been seen, the recursion will be avoided. Yields: Attributes of the tuple and its sub-elements if the predicate is true. |
3,183 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `replace_namedtuple_values` function. Write a Python function `def replace_namedtuple_values(ntuple, predicate, mapper, memo=None)` to solve the following problem:
Recurse through all the members of namedtuples and lists, and for members that match the given predicate, run them through the given mapper. Args: ntuple: A namedtuple instance. predicate: A predicate function that returns true if an attribute is to be output. mapper: A callable, that will accept a single argument and return its replacement value. memo: An optional memoizing dictionary. If a tuple has already been seen, the recursion will be avoided. Yields: Attributes of the tuple and its sub-elements if the predicate is true.
Here is the function:
def replace_namedtuple_values(ntuple, predicate, mapper, memo=None):
"""Recurse through all the members of namedtuples and lists, and for
members that match the given predicate, run them through the given mapper.
Args:
ntuple: A namedtuple instance.
predicate: A predicate function that returns true if an attribute is to be
output.
mapper: A callable, that will accept a single argument and return its
replacement value.
memo: An optional memoizing dictionary. If a tuple has already been seen, the
recursion will be avoided.
Yields:
Attributes of the tuple and its sub-elements if the predicate is true.
"""
if memo is None:
memo = set()
id_ntuple = id(ntuple)
if id_ntuple in memo:
return None
memo.add(id_ntuple)
# pylint: disable=unidiomatic-typecheck
if not (type(ntuple) is not tuple and isinstance(ntuple, tuple)):
return ntuple
replacements = {}
for attribute_name, attribute in zip(ntuple._fields, ntuple):
if predicate(attribute):
replacements[attribute_name] = mapper(attribute)
elif type(attribute) is not tuple and isinstance(attribute, tuple):
replacements[attribute_name] = replace_namedtuple_values(
attribute, predicate, mapper, memo)
elif type(attribute) in (list, tuple):
replacements[attribute_name] = [
replace_namedtuple_values(member, predicate, mapper, memo)
for member in attribute]
return ntuple._replace(**replacements) | Recurse through all the members of namedtuples and lists, and for members that match the given predicate, run them through the given mapper. Args: ntuple: A namedtuple instance. predicate: A predicate function that returns true if an attribute is to be output. mapper: A callable, that will accept a single argument and return its replacement value. memo: An optional memoizing dictionary. If a tuple has already been seen, the recursion will be avoided. Yields: Attributes of the tuple and its sub-elements if the predicate is true. |
3,184 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `compute_unique_clean_ids` function. Write a Python function `def compute_unique_clean_ids(strings)` to solve the following problem:
Given a sequence of strings, reduce them to corresponding ids without any funny characters and insure that the list of ids is unique. Yields pairs of (id, string) for the result. Args: strings: A list of strings. Returns: A list of (id, string) pairs.
Here is the function:
def compute_unique_clean_ids(strings):
"""Given a sequence of strings, reduce them to corresponding ids without any
funny characters and insure that the list of ids is unique. Yields pairs
of (id, string) for the result.
Args:
strings: A list of strings.
Returns:
A list of (id, string) pairs.
"""
string_set = set(strings)
# Try multiple methods until we get one that has no collisions.
for regexp, replacement in [(r'[^A-Za-z0-9.-]', '_'),
(r'[^A-Za-z0-9_]', ''),]:
seen = set()
idmap = {}
mre = re.compile(regexp)
for string in string_set:
id_ = mre.sub(replacement, string)
if id_ in seen:
break # Collision.
seen.add(id_)
idmap[id_] = string
else:
break
else:
return None # Could not find a unique mapping.
return idmap | Given a sequence of strings, reduce them to corresponding ids without any funny characters and insure that the list of ids is unique. Yields pairs of (id, string) for the result. Args: strings: A list of strings. Returns: A list of (id, string) pairs. |
3,185 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `escape_string` function. Write a Python function `def escape_string(string)` to solve the following problem:
Escape quotes and backslashes in payee and narration. Args: string: Any string. Returns. The input string, with offending characters replaced.
Here is the function:
def escape_string(string):
"""Escape quotes and backslashes in payee and narration.
Args:
string: Any string.
Returns.
The input string, with offending characters replaced.
"""
return string.replace('\\', r'\\')\
.replace('"', r'\"') | Escape quotes and backslashes in payee and narration. Args: string: Any string. Returns. The input string, with offending characters replaced. |
3,186 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `idify` function. Write a Python function `def idify(string)` to solve the following problem:
Replace characters objectionable for a filename with underscores. Args: string: Any string. Returns: The input string, with offending characters replaced.
Here is the function:
def idify(string):
"""Replace characters objectionable for a filename with underscores.
Args:
string: Any string.
Returns:
The input string, with offending characters replaced.
"""
for sfrom, sto in [(r'[ \(\)]+', '_'),
(r'_*\._*', '.')]:
string = re.sub(sfrom, sto, string)
string = string.strip('_')
return string | Replace characters objectionable for a filename with underscores. Args: string: Any string. Returns: The input string, with offending characters replaced. |
3,187 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `dictmap` function. Write a Python function `def dictmap(mdict, keyfun=None, valfun=None)` to solve the following problem:
Map a dictionary's value. Args: mdict: A dict. key: A callable to apply to the keys. value: A callable to apply to the values.
Here is the function:
def dictmap(mdict, keyfun=None, valfun=None):
"""Map a dictionary's value.
Args:
mdict: A dict.
key: A callable to apply to the keys.
value: A callable to apply to the values.
"""
if keyfun is None:
keyfun = lambda x: x
if valfun is None:
valfun = lambda x: x
return {keyfun(key): valfun(val) for key, val in mdict.items()} | Map a dictionary's value. Args: mdict: A dict. key: A callable to apply to the keys. value: A callable to apply to the values. |
3,188 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `map_namedtuple_attributes` function. Write a Python function `def map_namedtuple_attributes(attributes, mapper, object_)` to solve the following problem:
Map the value of the named attributes of object by mapper. Args: attributes: A sequence of string, the attribute names to map. mapper: A callable that accepts the value of a field and returns the new value. object_: Some namedtuple object with attributes on it. Returns: A new instance of the same namedtuple with the named fields mapped by mapper.
Here is the function:
def map_namedtuple_attributes(attributes, mapper, object_):
"""Map the value of the named attributes of object by mapper.
Args:
attributes: A sequence of string, the attribute names to map.
mapper: A callable that accepts the value of a field and returns
the new value.
object_: Some namedtuple object with attributes on it.
Returns:
A new instance of the same namedtuple with the named fields mapped by
mapper.
"""
return object_._replace(**{attribute: mapper(getattr(object_, attribute))
for attribute in attributes}) | Map the value of the named attributes of object by mapper. Args: attributes: A sequence of string, the attribute names to map. mapper: A callable that accepts the value of a field and returns the new value. object_: Some namedtuple object with attributes on it. Returns: A new instance of the same namedtuple with the named fields mapped by mapper. |
3,189 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `staticvar` function. Write a Python function `def staticvar(varname, initial_value)` to solve the following problem:
Returns a decorator that defines a Python function attribute. This is used to simulate a static function variable in Python. Args: varname: A string, the name of the variable to define. initial_value: The value to initialize the variable to. Returns: A function decorator.
Here is the function:
def staticvar(varname, initial_value):
"""Returns a decorator that defines a Python function attribute.
This is used to simulate a static function variable in Python.
Args:
varname: A string, the name of the variable to define.
initial_value: The value to initialize the variable to.
Returns:
A function decorator.
"""
def deco(fun):
setattr(fun, varname, initial_value)
return fun
return deco | Returns a decorator that defines a Python function attribute. This is used to simulate a static function variable in Python. Args: varname: A string, the name of the variable to define. initial_value: The value to initialize the variable to. Returns: A function decorator. |
3,190 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `first_paragraph` function. Write a Python function `def first_paragraph(docstring)` to solve the following problem:
Return the first sentence of a docstring. The sentence has to be delimited by an empty line. Args: docstring: A doc string. Returns: A string with just the first sentence on a single line.
Here is the function:
def first_paragraph(docstring):
"""Return the first sentence of a docstring.
The sentence has to be delimited by an empty line.
Args:
docstring: A doc string.
Returns:
A string with just the first sentence on a single line.
"""
lines = []
for line in docstring.strip().splitlines():
if not line:
break
lines.append(line.rstrip())
return ' '.join(lines) | Return the first sentence of a docstring. The sentence has to be delimited by an empty line. Args: docstring: A doc string. Returns: A string with just the first sentence on a single line. |
3,191 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
def _get_screen_value(attrname, default=0):
"""Return the width or height of the terminal that runs this program."""
try:
curses = import_curses()
except ImportError:
value = default
else:
try:
curses.setupterm()
value = curses.tigetnum(attrname)
except (io.UnsupportedOperation, curses.error):
value = default
return value
The provided code snippet includes necessary dependencies for implementing the `get_screen_width` function. Write a Python function `def get_screen_width()` to solve the following problem:
Return the width of the terminal that runs this program. Returns: An integer, the number of characters the screen is wide. Return 0 if the terminal cannot be initialized.
Here is the function:
def get_screen_width():
"""Return the width of the terminal that runs this program.
Returns:
An integer, the number of characters the screen is wide.
Return 0 if the terminal cannot be initialized.
"""
return _get_screen_value('cols', 0) | Return the width of the terminal that runs this program. Returns: An integer, the number of characters the screen is wide. Return 0 if the terminal cannot be initialized. |
3,192 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
def _get_screen_value(attrname, default=0):
"""Return the width or height of the terminal that runs this program."""
try:
curses = import_curses()
except ImportError:
value = default
else:
try:
curses.setupterm()
value = curses.tigetnum(attrname)
except (io.UnsupportedOperation, curses.error):
value = default
return value
The provided code snippet includes necessary dependencies for implementing the `get_screen_height` function. Write a Python function `def get_screen_height()` to solve the following problem:
Return the height of the terminal that runs this program. Returns: An integer, the number of characters the screen is high. Return 0 if the terminal cannot be initialized.
Here is the function:
def get_screen_height():
"""Return the height of the terminal that runs this program.
Returns:
An integer, the number of characters the screen is high.
Return 0 if the terminal cannot be initialized.
"""
return _get_screen_value('lines', 0) | Return the height of the terminal that runs this program. Returns: An integer, the number of characters the screen is high. Return 0 if the terminal cannot be initialized. |
3,193 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
class TypeComparable:
"""A base class whose equality comparison includes comparing the
type of the instance itself.
"""
def __eq__(self, other):
return isinstance(other, type(self)) and super().__eq__(other)
The provided code snippet includes necessary dependencies for implementing the `cmptuple` function. Write a Python function `def cmptuple(name, attributes)` to solve the following problem:
Manufacture a comparable namedtuple class, similar to collections.namedtuple. A comparable named tuple is a tuple which compares to False if contents are equal but the data types are different. We define this to supplement collections.namedtuple because by default a namedtuple disregards the type and we want to make precise comparisons for tests. Args: name: The given name of the class. attributes: A string or tuple of strings, with the names of the attributes. Returns: A new namedtuple-derived type that compares False with other tuples with same contents.
Here is the function:
def cmptuple(name, attributes):
"""Manufacture a comparable namedtuple class, similar to collections.namedtuple.
A comparable named tuple is a tuple which compares to False if contents are
equal but the data types are different. We define this to supplement
collections.namedtuple because by default a namedtuple disregards the type
and we want to make precise comparisons for tests.
Args:
name: The given name of the class.
attributes: A string or tuple of strings, with the names of the
attributes.
Returns:
A new namedtuple-derived type that compares False with other
tuples with same contents.
"""
base = collections.namedtuple('_{}'.format(name), attributes)
return type(name, (TypeComparable, base,), {}) | Manufacture a comparable namedtuple class, similar to collections.namedtuple. A comparable named tuple is a tuple which compares to False if contents are equal but the data types are different. We define this to supplement collections.namedtuple because by default a namedtuple disregards the type and we want to make precise comparisons for tests. Args: name: The given name of the class. attributes: A string or tuple of strings, with the names of the attributes. Returns: A new namedtuple-derived type that compares False with other tuples with same contents. |
3,194 | from collections import defaultdict
from time import time
import collections
import contextlib
import functools
import io
import re
import sys
import warnings
The provided code snippet includes necessary dependencies for implementing the `is_sorted` function. Write a Python function `def is_sorted(iterable, key=lambda x: x, cmp=lambda x, y: x <= y)` to solve the following problem:
Return true if the sequence is sorted. Args: iterable: An iterable sequence. key: A function to extract the quantity by which to sort. cmp: A function that compares two elements of a sequence. Returns: A boolean, true if the sequence is sorted.
Here is the function:
def is_sorted(iterable, key=lambda x: x, cmp=lambda x, y: x <= y):
"""Return true if the sequence is sorted.
Args:
iterable: An iterable sequence.
key: A function to extract the quantity by which to sort.
cmp: A function that compares two elements of a sequence.
Returns:
A boolean, true if the sequence is sorted.
"""
iterator = map(key, iterable)
prev = next(iterator)
for element in iterator:
if not cmp(prev, element):
return False
prev = element
return True | Return true if the sequence is sorted. Args: iterable: An iterable sequence. key: A function to extract the quantity by which to sort. cmp: A function that compares two elements of a sequence. Returns: A boolean, true if the sequence is sorted. |
3,195 | import shelve
import threading
import hashlib
import datetime
import functools
import io
def now():
"Indirection on datetime.datetime.now() for testing."
return datetime.datetime.now()
The provided code snippet includes necessary dependencies for implementing the `memoize_recent_fileobj` function. Write a Python function `def memoize_recent_fileobj(function, cache_filename, expiration=None)` to solve the following problem:
Memoize recent calls to the given function which returns a file object. The results of the cache expire after some time. Args: function: A callable object. cache_filename: A string, the path to the database file to cache to. expiration: The time during which the results will be kept valid. Use 'None' to never expire the cache (this is the default). Returns: A memoized version of the function.
Here is the function:
def memoize_recent_fileobj(function, cache_filename, expiration=None):
"""Memoize recent calls to the given function which returns a file object.
The results of the cache expire after some time.
Args:
function: A callable object.
cache_filename: A string, the path to the database file to cache to.
expiration: The time during which the results will be kept valid. Use
'None' to never expire the cache (this is the default).
Returns:
A memoized version of the function.
"""
urlcache = shelve.open(cache_filename, 'c')
urlcache.lock = threading.Lock() # Note: 'shelve' is not thread-safe.
@functools.wraps(function)
def memoized(*args, **kw):
# Encode the arguments, including a date string in order to invalidate
# results over some time.
md5 = hashlib.md5()
md5.update(str(args).encode('utf-8'))
md5.update(str(sorted(kw.items())).encode('utf-8'))
hash_ = md5.hexdigest()
time_now = now()
try:
with urlcache.lock:
time_orig, contents = urlcache[hash_]
if expiration is not None and (time_now - time_orig) > expiration:
raise KeyError
except KeyError:
fileobj = function(*args, **kw)
if fileobj:
contents = fileobj.read()
with urlcache.lock:
urlcache[hash_] = (time_now, contents)
else:
contents = None
return io.BytesIO(contents) if contents else None
return memoized | Memoize recent calls to the given function which returns a file object. The results of the cache expire after some time. Args: function: A callable object. cache_filename: A string, the path to the database file to cache to. expiration: The time during which the results will be kept valid. Use 'None' to never expire the cache (this is the default). Returns: A memoized version of the function. |
3,196 | import re
import subprocess
from os import path
The provided code snippet includes necessary dependencies for implementing the `is_gpg_installed` function. Write a Python function `def is_gpg_installed()` to solve the following problem:
Return true if GPG 1.4.x or 2.x are installed, which is what we use and support.
Here is the function:
def is_gpg_installed():
"""Return true if GPG 1.4.x or 2.x are installed, which is what we use and support."""
try:
pipe = subprocess.Popen(['gpg', '--version'], shell=0,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
version_text = out.decode('utf8')
return pipe.returncode == 0 and re.match(r'gpg \(GnuPG\) (1\.4|2)\.', version_text)
except OSError:
return False | Return true if GPG 1.4.x or 2.x are installed, which is what we use and support. |
3,197 | import collections
import functools
import threading
The provided code snippet includes necessary dependencies for implementing the `snoopify` function. Write a Python function `def snoopify(function)` to solve the following problem:
Decorate a function as snoopable. This is meant to reassign existing functions to a snoopable version of them. For example, if you wanted 're.match' to be automatically snoopable, just decorate it like this: re.match = snoopify(re.match) and then you can just call 're.match' in a conditional and then access 're.match.value' to get to the last returned value.
Here is the function:
def snoopify(function):
"""Decorate a function as snoopable.
This is meant to reassign existing functions to a snoopable version of them.
For example, if you wanted 're.match' to be automatically snoopable, just
decorate it like this:
re.match = snoopify(re.match)
and then you can just call 're.match' in a conditional and then access
're.match.value' to get to the last returned value.
"""
@functools.wraps(function)
def wrapper(*args, **kw):
value = function(*args, **kw)
wrapper.value = value
return value
wrapper.value = None
return wrapper | Decorate a function as snoopable. This is meant to reassign existing functions to a snoopable version of them. For example, if you wanted 're.match' to be automatically snoopable, just decorate it like this: re.match = snoopify(re.match) and then you can just call 're.match' in a conditional and then access 're.match.value' to get to the last returned value. |
3,198 | import importlib
The provided code snippet includes necessary dependencies for implementing the `import_symbol` function. Write a Python function `def import_symbol(dotted_name)` to solve the following problem:
Import a symbol in an arbitrary module. Args: dotted_name: A dotted path to a symbol. Returns: The object referenced by the given name. Raises: ImportError: If the module not not be imported. AttributeError: If the symbol could not be found in the module.
Here is the function:
def import_symbol(dotted_name):
"""Import a symbol in an arbitrary module.
Args:
dotted_name: A dotted path to a symbol.
Returns:
The object referenced by the given name.
Raises:
ImportError: If the module not not be imported.
AttributeError: If the symbol could not be found in the module.
"""
comps = dotted_name.split('.')
module_name = '.'.join(comps[:-1])
symbol_name = comps[-1]
module = importlib.import_module(module_name)
return getattr(module, symbol_name) | Import a symbol in an arbitrary module. Args: dotted_name: A dotted path to a symbol. Returns: The object referenced by the given name. Raises: ImportError: If the module not not be imported. AttributeError: If the symbol could not be found in the module. |
3,199 | import contextlib
import datetime
import os
import time
The provided code snippet includes necessary dependencies for implementing the `iter_dates` function. Write a Python function `def iter_dates(start_date, end_date)` to solve the following problem:
Yield all the dates between 'start_date' and 'end_date'. Args: start_date: An instance of datetime.date. end_date: An instance of datetime.date. Yields: Instances of datetime.date.
Here is the function:
def iter_dates(start_date, end_date):
"""Yield all the dates between 'start_date' and 'end_date'.
Args:
start_date: An instance of datetime.date.
end_date: An instance of datetime.date.
Yields:
Instances of datetime.date.
"""
oneday = datetime.timedelta(days=1)
date = start_date
while date < end_date:
yield date
date += oneday | Yield all the dates between 'start_date' and 'end_date'. Args: start_date: An instance of datetime.date. end_date: An instance of datetime.date. Yields: Instances of datetime.date. |
3,200 | import contextlib
import datetime
import os
import time
The provided code snippet includes necessary dependencies for implementing the `render_ofx_date` function. Write a Python function `def render_ofx_date(dtime)` to solve the following problem:
Render a datetime to the OFX format. Args: dtime: A datetime.datetime instance. Returns: A string, rendered to milliseconds.
Here is the function:
def render_ofx_date(dtime):
"""Render a datetime to the OFX format.
Args:
dtime: A datetime.datetime instance.
Returns:
A string, rendered to milliseconds.
"""
return '{}.{:03d}'.format(dtime.strftime('%Y%m%d%H%M%S'),
int(dtime.microsecond / 1000)) | Render a datetime to the OFX format. Args: dtime: A datetime.datetime instance. Returns: A string, rendered to milliseconds. |
3,201 | import contextlib
import datetime
import os
import time
The provided code snippet includes necessary dependencies for implementing the `next_month` function. Write a Python function `def next_month(date)` to solve the following problem:
Compute the date at the beginning of the following month from the given date. Args: date: A datetime.date instance. Returns: A datetime.date instance, the first day of the month following 'date'.
Here is the function:
def next_month(date):
"""Compute the date at the beginning of the following month from the given date.
Args:
date: A datetime.date instance.
Returns:
A datetime.date instance, the first day of the month following 'date'.
"""
# Compute the date at the beginning of the following month.
year = date.year
month = date.month + 1
if date.month == 12:
year += 1
month = 1
return datetime.date(year, month, 1) | Compute the date at the beginning of the following month from the given date. Args: date: A datetime.date instance. Returns: A datetime.date instance, the first day of the month following 'date'. |
3,202 | import contextlib
import datetime
import os
import time
The provided code snippet includes necessary dependencies for implementing the `intimezone` function. Write a Python function `def intimezone(tz_value: str)` to solve the following problem:
Temporarily reset the value of TZ. This is used for testing. Args: tz_value: The value of TZ to set for the duration of this context. Returns: A contextmanager in the given timezone locale.
Here is the function:
def intimezone(tz_value: str):
"""Temporarily reset the value of TZ.
This is used for testing.
Args:
tz_value: The value of TZ to set for the duration of this context.
Returns:
A contextmanager in the given timezone locale.
"""
tz_old = os.environ.get('TZ', None)
os.environ['TZ'] = tz_value
time.tzset()
try:
yield
finally:
if tz_old is None:
del os.environ['TZ']
else:
os.environ['TZ'] = tz_old
time.tzset() | Temporarily reset the value of TZ. This is used for testing. Args: tz_value: The value of TZ to set for the duration of this context. Returns: A contextmanager in the given timezone locale. |
3,203 | from os import path
import contextlib
import logging
import os
import time
The provided code snippet includes necessary dependencies for implementing the `find_files` function. Write a Python function `def find_files(fords, ignore_dirs=('.hg', '.svn', '.git'), ignore_files=('.DS_Store',))` to solve the following problem:
Enumerate the files under the given directories, stably. Invalid file or directory names will be logged to the error log. Args: fords: A list of strings, file or directory names. ignore_dirs: A list of strings, filenames or directories to be ignored. Yields: Strings, full filenames from the given roots.
Here is the function:
def find_files(fords,
ignore_dirs=('.hg', '.svn', '.git'),
ignore_files=('.DS_Store',)):
"""Enumerate the files under the given directories, stably.
Invalid file or directory names will be logged to the error log.
Args:
fords: A list of strings, file or directory names.
ignore_dirs: A list of strings, filenames or directories to be ignored.
Yields:
Strings, full filenames from the given roots.
"""
if isinstance(fords, str):
fords = [fords]
assert isinstance(fords, (list, tuple))
for ford in fords:
if path.isdir(ford):
for root, dirs, filenames in os.walk(ford):
dirs[:] = sorted(dirname for dirname in dirs if dirname not in ignore_dirs)
for filename in sorted(filenames):
if filename in ignore_files:
continue
yield path.join(root, filename)
elif path.isfile(ford) or path.islink(ford):
yield ford
elif not path.exists(ford):
logging.error("File or directory '{}' does not exist.".format(ford)) | Enumerate the files under the given directories, stably. Invalid file or directory names will be logged to the error log. Args: fords: A list of strings, file or directory names. ignore_dirs: A list of strings, filenames or directories to be ignored. Yields: Strings, full filenames from the given roots. |
3,204 | from os import path
import contextlib
import logging
import os
import time
The provided code snippet includes necessary dependencies for implementing the `guess_file_format` function. Write a Python function `def guess_file_format(filename, default=None)` to solve the following problem:
Guess the file format from the filename. Args: filename: A string, the name of the file. This can be None. Returns: A string, the extension of the format, without a leading period.
Here is the function:
def guess_file_format(filename, default=None):
"""Guess the file format from the filename.
Args:
filename: A string, the name of the file. This can be None.
Returns:
A string, the extension of the format, without a leading period.
"""
if filename:
if filename.endswith('.txt') or filename.endswith('.text'):
format = 'text'
elif filename.endswith('.csv'):
format = 'csv'
elif filename.endswith('.html') or filename.endswith('.xhtml'):
format = 'html'
else:
format = default
else:
format = default
return format | Guess the file format from the filename. Args: filename: A string, the name of the file. This can be None. Returns: A string, the extension of the format, without a leading period. |
3,205 | from os import path
import contextlib
import logging
import os
import time
The provided code snippet includes necessary dependencies for implementing the `path_greedy_split` function. Write a Python function `def path_greedy_split(filename)` to solve the following problem:
Split a path, returning the longest possible extension. Args: filename: A string, the filename to split. Returns: A pair of basename, extension (which includes the leading period).
Here is the function:
def path_greedy_split(filename):
"""Split a path, returning the longest possible extension.
Args:
filename: A string, the filename to split.
Returns:
A pair of basename, extension (which includes the leading period).
"""
basename = path.basename(filename)
index = basename.find('.')
if index == -1:
extension = None
else:
extension = basename[index:]
basename = basename[:index]
return (path.join(path.dirname(filename), basename), extension) | Split a path, returning the longest possible extension. Args: filename: A string, the filename to split. Returns: A pair of basename, extension (which includes the leading period). |
3,206 | from os import path
import contextlib
import logging
import os
import time
The provided code snippet includes necessary dependencies for implementing the `touch_file` function. Write a Python function `def touch_file(filename, *otherfiles)` to solve the following problem:
Touch a file and wait until its timestamp has been changed. Args: filename: A string path, the name of the file to touch. otherfiles: A list of other files to ensure the timestamp is beyond of.
Here is the function:
def touch_file(filename, *otherfiles):
"""Touch a file and wait until its timestamp has been changed.
Args:
filename: A string path, the name of the file to touch.
otherfiles: A list of other files to ensure the timestamp is beyond of.
"""
# Note: You could set os.stat_float_times() but then the main function would
# have to set that up as well. It doesn't help so much, however, since
# filesystems tend to have low resolutions, e.g. one second.
orig_mtime_ns = max(os.stat(minfile).st_mtime_ns
for minfile in (filename,) + otherfiles)
delay_secs = 0.05
while True:
with open(filename, 'a'):
os.utime(filename)
time.sleep(delay_secs)
new_stat = os.stat(filename)
if new_stat.st_mtime_ns > orig_mtime_ns:
break | Touch a file and wait until its timestamp has been changed. Args: filename: A string path, the name of the file to touch. otherfiles: A list of other files to ensure the timestamp is beyond of. |
3,207 | import csv
import collections
import io
import itertools
Table = collections.namedtuple('Table', 'columns header body')
def attribute_to_title(fieldname):
"""Convert programming id into readable field name.
Args:
fieldname: A string, a programming ids, such as 'book_value'.
Returns:
A readable string, such as 'Book Value.'
"""
return fieldname.replace('_', ' ').title()
The provided code snippet includes necessary dependencies for implementing the `create_table` function. Write a Python function `def create_table(rows, field_spec=None)` to solve the following problem:
Convert a list of tuples to an table report object. Args: rows: A list of tuples. field_spec: A list of strings, or a list of (FIELDNAME-OR-INDEX, HEADER, FORMATTER-FUNCTION) triplets, that selects a subset of the fields is to be rendered as well as their ordering. If this is a dict, the values are functions to call on the fields to render them. If a function is set to None, we will just call str() on the field. Returns: A Table instance.
Here is the function:
def create_table(rows, field_spec=None):
"""Convert a list of tuples to an table report object.
Args:
rows: A list of tuples.
field_spec: A list of strings, or a list of
(FIELDNAME-OR-INDEX, HEADER, FORMATTER-FUNCTION)
triplets, that selects a subset of the fields is to be rendered as well
as their ordering. If this is a dict, the values are functions to call
on the fields to render them. If a function is set to None, we will just
call str() on the field.
Returns:
A Table instance.
"""
# Normalize field_spec to a dict.
if field_spec is None:
namedtuple_class = type(rows[0])
field_spec = [(field, None, None)
for field in namedtuple_class._fields]
elif isinstance(field_spec, (list, tuple)):
new_field_spec = []
for field in field_spec:
if isinstance(field, tuple):
assert len(field) <= 3, field
if len(field) == 1:
field = field[0]
new_field_spec.append((field, None, None))
elif len(field) == 2:
field, header = field
new_field_spec.append((field, header, None))
elif len(field) == 3:
new_field_spec.append(field)
else:
if isinstance(field, str):
title = attribute_to_title(field)
elif isinstance(field, int):
title = "Field {}".format(field)
else:
raise ValueError("Invalid type for column name")
new_field_spec.append((field, title, None))
field_spec = new_field_spec
# Ensure a nicely formatted header.
field_spec = [((name, attribute_to_title(name), formatter)
if header_ is None
else (name, header_, formatter))
for (name, header_, formatter) in field_spec]
assert isinstance(field_spec, list), field_spec
assert all(len(x) == 3 for x in field_spec), field_spec
# Compute the column names.
columns = [name for (name, _, __) in field_spec]
# Compute the table header.
header = [header_column for (_, header_column, __) in field_spec]
# Compute the table body.
body = []
for row in rows:
body_row = []
for name, _, formatter in field_spec:
if isinstance(name, str):
value = getattr(row, name)
elif isinstance(name, int):
value = row[name]
else:
raise ValueError("Invalid type for column name")
if value is not None:
if formatter is not None:
value = formatter(value)
else:
value = str(value)
else:
value = ''
body_row.append(value)
body.append(body_row)
return Table(columns, header, body) | Convert a list of tuples to an table report object. Args: rows: A list of tuples. field_spec: A list of strings, or a list of (FIELDNAME-OR-INDEX, HEADER, FORMATTER-FUNCTION) triplets, that selects a subset of the fields is to be rendered as well as their ordering. If this is a dict, the values are functions to call on the fields to render them. If a function is set to None, we will just call str() on the field. Returns: A Table instance. |
3,208 | import csv
import collections
import io
import itertools
def table_to_html(table, classes=None, file=None):
"""Render a Table to HTML.
Args:
table: An instance of a Table.
classes: A list of string, CSS classes to set on the table.
file: A file object to write to. If no object is provided, this
function returns a string.
Returns:
A string, the rendered table, or None, if a file object is provided
to write to.
"""
# Initialize file.
oss = io.StringIO() if file is None else file
oss.write('<table class="{}">\n'.format(' '.join(classes or [])))
# Render header.
if table.header:
oss.write(' <thead>\n')
oss.write(' <tr>\n')
for header in table.header:
oss.write(' <th>{}</th>\n'.format(header))
oss.write(' </tr>\n')
oss.write(' </thead>\n')
# Render body.
oss.write(' <tbody>\n')
for row in table.body:
oss.write(' <tr>\n')
for cell in row:
oss.write(' <td>{}</td>\n'.format(cell))
oss.write(' </tr>\n')
oss.write(' </tbody>\n')
# Render footer.
oss.write('</table>\n')
if file is None:
return oss.getvalue()
def table_to_text(table,
column_interspace=" ",
formats=None):
"""Render a Table to ASCII text.
Args:
table: An instance of a Table.
column_interspace: A string to render between the columns as spacer.
formats: An optional dict of column name to a format character that gets
inserted in a format string specified, like this (where '<char>' is):
{:<char><width>}. A key of '*' will provide a default value, like
this, for example: (... formats={'*': '>'}).
Returns:
A string, the rendered text table.
"""
column_widths = compute_table_widths(itertools.chain([table.header],
table.body))
# Insert column format chars and compute line formatting string.
column_formats = []
if formats:
default_format = formats.get('*', None)
for column, width in zip(table.columns, column_widths):
if column and formats:
format_ = formats.get(column, default_format)
if format_:
column_formats.append("{{:{}{:d}}}".format(format_, width))
else:
column_formats.append("{{:{:d}}}".format(width))
else:
column_formats.append("{{:{:d}}}".format(width))
line_format = column_interspace.join(column_formats) + "\n"
separator = line_format.format(*[('-' * width) for width in column_widths])
# Render the header.
oss = io.StringIO()
if table.header:
oss.write(line_format.format(*table.header))
# Render the body.
oss.write(separator)
for row in table.body:
oss.write(line_format.format(*row))
oss.write(separator)
return oss.getvalue()
def table_to_csv(table, file=None, **kwargs):
"""Render a Table to a CSV file.
Args:
table: An instance of a Table.
file: A file object to write to. If no object is provided, this
function returns a string.
**kwargs: Optional arguments forwarded to csv.writer().
Returns:
A string, the rendered table, or None, if a file object is provided
to write to.
"""
output_file = file or io.StringIO()
writer = csv.writer(output_file, **kwargs)
if table.header:
writer.writerow(table.header)
writer.writerows(table.body)
if not file:
return output_file.getvalue()
The provided code snippet includes necessary dependencies for implementing the `render_table` function. Write a Python function `def render_table(table_, output, output_format, css_id=None, css_class=None)` to solve the following problem:
Render the given table to the output file object in the requested format. The table gets written out to the 'output' file. Args: table_: An instance of Table. output: A file object you can write to. output_format: A string, the format to write the table to, either 'csv', 'txt' or 'html'. css_id: A string, an optional CSS id for the table object (only used for HTML). css_class: A string, an optional CSS class for the table object (only used for HTML).
Here is the function:
def render_table(table_, output, output_format, css_id=None, css_class=None):
"""Render the given table to the output file object in the requested format.
The table gets written out to the 'output' file.
Args:
table_: An instance of Table.
output: A file object you can write to.
output_format: A string, the format to write the table to,
either 'csv', 'txt' or 'html'.
css_id: A string, an optional CSS id for the table object (only used for HTML).
css_class: A string, an optional CSS class for the table object (only used for HTML).
"""
if output_format in ('txt', 'text'):
text = table_to_text(table_, " ", formats={'*': '>', 'account': '<'})
output.write(text)
elif output_format in ('csv',):
table_to_csv(table_, file=output)
elif output_format in ('htmldiv', 'html'):
if output_format == 'html':
output.write('<html>\n')
output.write('<body>\n')
output.write('<div id="{}">\n'.format(css_id) if css_id else '<div>\n')
classes = [css_class] if css_class else None
table_to_html(table_, file=output, classes=classes)
output.write('</div>\n')
if output_format == 'html':
output.write('</body>\n')
output.write('</html>\n')
else:
raise NotImplementedError("Unsupported format: {}".format(output_format)) | Render the given table to the output file object in the requested format. The table gets written out to the 'output' file. Args: table_: An instance of Table. output: A file object you can write to. output_format: A string, the format to write the table to, either 'csv', 'txt' or 'html'. css_id: A string, an optional CSS id for the table object (only used for HTML). css_class: A string, an optional CSS class for the table object (only used for HTML). |
3,209 | import codecs
import contextlib
import os
import sys
import subprocess
import io
import logging
DEFAULT_PAGER = 'more'
The provided code snippet includes necessary dependencies for implementing the `create_pager` function. Write a Python function `def create_pager(command, file)` to solve the following problem:
Try to create and return a pager subprocess. Args: command: A string, the shell command to run as a pager. file: The file object for the pager write to. This is also used as a default if we failed to create the pager subprocess. Returns: A pair of (file, pipe), a file object and an optional subprocess.Popen instance to wait on. The pipe instance may be set to None if we failed to create a subprocess.
Here is the function:
def create_pager(command, file):
"""Try to create and return a pager subprocess.
Args:
command: A string, the shell command to run as a pager.
file: The file object for the pager write to. This is also used as a
default if we failed to create the pager subprocess.
Returns:
A pair of (file, pipe), a file object and an optional subprocess.Popen instance
to wait on. The pipe instance may be set to None if we failed to create a subprocess.
"""
if command is None:
command = os.environ.get('PAGER', DEFAULT_PAGER)
if not command:
command = DEFAULT_PAGER
pipe = None
# In case of using 'less', make sure the charset is set properly. In theory
# you could override this by setting PAGER to "LESSCHARSET=utf-8 less" but
# this shouldn't affect other programs and is unlikely to cause problems, so
# we set it here to make default behavior work for most people (we always
# write UTF-8).
env = os.environ.copy()
env['LESSCHARSET'] = "utf-8"
try:
pipe = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE,
stdout=file,
env=env)
except OSError as exc:
logging.error("Invalid pager: {}".format(exc))
else:
stdin_wrapper = io.TextIOWrapper(pipe.stdin, 'utf-8')
file = stdin_wrapper
return file, pipe | Try to create and return a pager subprocess. Args: command: A string, the shell command to run as a pager. file: The file object for the pager write to. This is also used as a default if we failed to create the pager subprocess. Returns: A pair of (file, pipe), a file object and an optional subprocess.Popen instance to wait on. The pipe instance may be set to None if we failed to create a subprocess. |
3,210 | import codecs
import contextlib
import os
import sys
import subprocess
import io
import logging
The provided code snippet includes necessary dependencies for implementing the `flush_only` function. Write a Python function `def flush_only(fileobj)` to solve the following problem:
A contextmanager around a file object that does not close the file. This is used to return a context manager on a file object but not close it. We flush it instead. This is useful in order to provide an alternative to a pager class as above. Args: fileobj: A file object, to remain open after running the context manager. Yields: A context manager that yields this object.
Here is the function:
def flush_only(fileobj):
"""A contextmanager around a file object that does not close the file.
This is used to return a context manager on a file object but not close it.
We flush it instead. This is useful in order to provide an alternative to a
pager class as above.
Args:
fileobj: A file object, to remain open after running the context manager.
Yields:
A context manager that yields this object.
"""
try:
yield fileobj
finally:
fileobj.flush() | A contextmanager around a file object that does not close the file. This is used to return a context manager on a file object but not close it. We flush it instead. This is useful in order to provide an alternative to a pager class as above. Args: fileobj: A file object, to remain open after running the context manager. Yields: A context manager that yields this object. |
3,211 | import types
def invariant_check(method, prefun, postfun):
"""Decorate a method with the pre/post invariant checkers.
Args:
method: An unbound method to instrument.
prefun: A function that checks invariants pre-call.
postfun: A function that checks invariants post-call.
Returns:
An unbound method, decorated.
"""
reentrant = []
def new_method(self, *args, **kw):
reentrant.append(None)
if len(reentrant) == 1:
prefun(self)
result = method(self, *args, **kw)
if len(reentrant) == 1:
postfun(self)
reentrant.pop()
return result
return new_method
The provided code snippet includes necessary dependencies for implementing the `instrument_invariants` function. Write a Python function `def instrument_invariants(klass, prefun, postfun)` to solve the following problem:
Instrument the class 'klass' with pre/post invariant checker functions. Args: klass: A class object, whose methods to be instrumented. prefun: A function that checks invariants pre-call. postfun: A function that checks invariants pre-call.
Here is the function:
def instrument_invariants(klass, prefun, postfun):
"""Instrument the class 'klass' with pre/post invariant
checker functions.
Args:
klass: A class object, whose methods to be instrumented.
prefun: A function that checks invariants pre-call.
postfun: A function that checks invariants pre-call.
"""
instrumented = {}
for attrname, object_ in klass.__dict__.items():
if attrname.startswith('_'):
continue
if not isinstance(object_, types.FunctionType):
continue
instrumented[attrname] = object_
setattr(klass, attrname,
invariant_check(object_, prefun, postfun))
klass.__instrumented = instrumented | Instrument the class 'klass' with pre/post invariant checker functions. Args: klass: A class object, whose methods to be instrumented. prefun: A function that checks invariants pre-call. postfun: A function that checks invariants pre-call. |
3,212 | import types
The provided code snippet includes necessary dependencies for implementing the `uninstrument_invariants` function. Write a Python function `def uninstrument_invariants(klass)` to solve the following problem:
Undo the instrumentation for invariants. Args: klass: A class object, whose methods to be uninstrumented.
Here is the function:
def uninstrument_invariants(klass):
"""Undo the instrumentation for invariants.
Args:
klass: A class object, whose methods to be uninstrumented.
"""
instrumented = getattr(klass, '__instrumented', None)
if instrumented:
for attrname, object_ in instrumented.items():
setattr(klass, attrname, object_)
del klass.__instrumented | Undo the instrumentation for invariants. Args: klass: A class object, whose methods to be uninstrumented. |
3,213 | import argparse
import collections
import logging
import os
import re
from os import path
def parse_htaccess(filename):
documents = collections.defaultdict(list)
redirects = []
with open(filename) as f:
for line in f:
match = re.match(r'RedirectMatch /doc/(.+?)\$\s+(.+)$', line)
if match:
name, url = match.groups()
url_match = re.match('https://docs.google.com/document/d/(.+)/$', url)
if url_match:
docid = url_match.group(1)
documents[docid].insert(0, name)
else:
redirects.append((name, url))
doc2id = {name[0]: docid for docid, name in documents.items()}
for name, url in redirects:
if not url.startswith('/beancount/doc/'):
continue
url = re.sub('^/beancount/doc/', '', url)
try:
docid = doc2id[url]
documents[docid].append(name)
except KeyError:
pass
return documents | null |
3,214 | import argparse
import bisect
import csv
import collections
import logging
import math
import random
import re
import textwrap
from os import path
from beancount.core.number import D
from beancount.core import amount
def groupby(sequence, key):
groups = collections.defaultdict(list)
for el in sequence:
groups[key(el)].append(el)
return dict(groups) | null |
3,215 | import argparse
import bisect
import csv
import collections
import logging
import math
import random
import re
import textwrap
from os import path
from beancount.core.number import D
from beancount.core import amount
def create_txns(p, x, x_end, all_lines, boxes, pr):
# Note: Side-effects on 'boxes'.
while x <= x_end:
x += p.x_inter_distance
txn = create_txn(p, x, all_lines, boxes)
if txn is None:
continue # Skip it.
y_selected, box = txn
draw_txn(p, x, y_selected, all_lines, pr)
boxes.append(box)
def draw_summarizing_txns(p, x, equity_account, account_regexp, line_pairs, pr):
all_y = sorted(p[1] for p in line_pairs)
y_previous = None
for (account, _), y in line_pairs:
if re.match(equity_account, account):
y_previous = y
if y_previous:
for (account, _), y in line_pairs:
if re.match(account_regexp, account):
y_selected = sorted([y_previous, y])
x += p.x_clear_distance
draw_txn(p, x, y_selected, all_y, pr)
def draw_type(p, y_start, balances, pr, scale_income):
"""Draw a set of lines with the given balances."""
# Draw the supporting lines.
y_lines = []
for iy, (account, balance_amount) in enumerate(balances):
y = y_start + p.y_interline * (iy+1)
y_lines.append(y)
# Select some balance and draw it at the end of the timeline.
acctype, account_name = account.strip().split(':', 1)
acctype += ':'
if scale_income is not None and acctype in ('Income:', 'Expenses:'):
amt = amount.from_string(balance_amount)
amt = amount.Amount((amt.number * scale_income).quantize(D('0.00')), amt.currency)
balance_amount = '{:>15}'.format(amt.to_string().replace(' ', ' '))
draw_line(p, y, acctype, account_name, balance_amount, pr)
return y_lines
def draw_diagram(p, balance_groups, filename, scale_income=None):
y_height = (p.y_margin +
sum(map(len, balance_groups)) * p.y_interline +
len(balance_groups) * p.y_margin_group) # Total height of the entire thing
with open(filename, 'w') as outfile:
pr = lambda *args: print(*args, file=outfile)
pr_null = lambda *args: print(*args, file=open('/dev/null', 'w'))
pr("<html>")
pr("<head>")
pr('<style type="text/css">')
pr(textwrap.dedent("""\
/* margin: 0px; */
/* padding: 0px; */
/* Defaults */
stroke-width: 2px;
stroke: #000;
/* div#top-level-svg { } */
/* svg { border: thin solid blue; }*/
"""))
pr("</style>")
pr("</head>")
pr("<body>")
pr('<div id="top-level-svg">')
# Make some definitions.
pr('''
<svg width="{width}px" height="{height}px" font-size="12px" >
<defs>
<marker id="arrow" markerWidth="10" markerHeight="6" refX="0" refY="2" orient="auto" markerUnits="strokeWidth">
<path d="M0,0 L0,4 L9,2 Z" fill="#000" />
</marker>
<g id="txn" transform="translate(-{r2},-{r2})">
<rect x="0" y="0" width="{r}" height="{r}" fill="#BBB" />
<text x="4" y="9" font-family="Courier" font-weight="bold" font-size="13px" alignment-baseline="central" >
T
</text>
</g>
</defs>
'''.format(width=p.x_width, height=y_height, r=p.txn_radius, r2=p.txn_radius/2))
#pr('<g transform="scale(1.0)">')
y = 0
all_lines = []
all_line_pairs = []
for balance_group in balance_groups:
# if p.draw_clearing:
# balance_group = [(account, ('0.00 USD'
# if re.match('Income|Expenses', account)
# else balance))
# for account, balance in balance_group]
if not p.draw_close:
balance_group = [(account, b)
for (account, b) in balance_group
if account.strip() != 'Equity:Earnings:Current']
y_lines = draw_type(p, y + p.y_margin_group, balance_group, pr, scale_income)
y = y_lines[-1]
assert len(balance_group) == len(y_lines)
all_line_pairs.extend(zip(balance_group, y_lines))
# Skip rendering postings on the Equity lines.
if balance_group[0][0].startswith('Equity'):
continue
all_lines.extend(y_lines)
# Create and render all transactions.
boxes = []
x = p.x_timeline_start + p.x_section_margin
x_end = x + p.x_timeline_before
# Before zone.
if p.draw_before:
pr('<g style="stroke: #000">')
create_txns(p, x, x_end, all_lines, boxes, pr)
pr('</g>')
else:
# To maintain random.
create_txns(p, x, x_end, all_lines, boxes, pr_null)
# Clearing zone.
if p.draw_clearing or p.draw_opening:
x = x_end + p.x_section_margin
x_end = x + p.x_timeline_open
if p.draw_clearing:
pr('<g style="stroke: #040">')
draw_summarizing_txns(p, x, 'Equity:Earnings:Previous', 'Income|Expenses', all_line_pairs, pr)
pr('</g>')
if p.draw_opening:
pr('<g style="stroke: #006">')
draw_summarizing_txns(p, x, 'Equity:Opening-Balances', 'Assets|Liabilities', list(reversed(all_line_pairs)), pr)
pr('</g>')
# Period zone.
if p.draw_period:
x = x_end + p.x_section_margin
x_end = x + p.x_timeline_period
pr('<g style="stroke: #000">')
create_txns(p, x, x_end, all_lines, boxes, pr)
pr('</g>')
# Close zone.
if p.draw_close:
x = x_end + p.x_section_margin
x_end = x + p.x_timeline_close
pr('<g style="stroke: #040">')
draw_summarizing_txns(p, x, 'Equity:Earnings:Current', 'Income|Expenses', all_line_pairs, pr)
pr('</g>')
# After zone.
if p.draw_after:
x = x_end + p.x_section_margin
x_end = x + p.x_timeline_after
pr('<g style="stroke: #000">')
create_txns(p, x, x_end, all_lines, boxes, pr)
pr('</g>')
#pr('</g>')
pr("</svg>")
pr("</div>")
pr("</body>")
pr("</html>") | null |
3,216 | from decimal import Decimal
from os import path
import argparse
import datetime
import functools
import itertools
import logging
import re
from typing import Optional
try:
import riegeli
except ImportError:
riegeli = None
from google.protobuf import text_format
from beancount import loader
from beancount.parser import parser
from beancount.parser import printer
from beancount.core import data
from beancount.core import amount
from beancount.core import position
from beancount.core.number import MISSING
from beancount.ccore import data_pb2 as pb
from beancount.ccore import date_pb2 as db
from beancount.ccore import number_pb2 as nb
from beancount.cparser import inter_pb2 as qb
from beancount.cparser import options_pb2 as ob
from beancount.parser import printer
def convert_Transaction(entry: data.Transaction) -> pb.Directive:
def convert_Open(entry: data.Open) -> pb.Directive:
def convert_Close(entry: data.Close) -> pb.Directive:
def convert_Commodity(entry: data.Commodity) -> pb.Directive:
def convert_Event(entry: data.Event) -> pb.Directive:
def convert_Note(entry: data.Note) -> pb.Directive:
def convert_Query(entry: data.Query) -> pb.Directive:
def convert_Price(entry: data.Price) -> pb.Directive:
def convert_Balance(entry: data.Balance) -> pb.Directive:
def convert_Pad(entry: data.Pad) -> pb.Directive:
def export_v2_data(filename: str, output_filename: str, num_directives: Optional[int]):
if riegeli is None or output_filename.endswith(".pbtxt"):
output = open(output_filename, "w")
writer = None
def write(message):
print(message, file=output)
else:
output = open(output_filename, "wb")
writer = riegeli.RecordWriter(output)
write = writer.write_message
# entries, errors, options_map = loader.load_file(filename)
entries, errors, options_map = parser.parse_file(filename)
entries = data.sorted(entries)
if num_directives:
entries = itertools.islice(entries, num_directives)
for entry in entries:
if isinstance(entry, data.Transaction):
pbdir = convert_Transaction(entry)
elif isinstance(entry, data.Open):
pbdir = convert_Open(entry)
elif isinstance(entry, data.Close):
pbdir = convert_Close(entry)
elif isinstance(entry, data.Commodity):
pbdir = convert_Commodity(entry)
elif isinstance(entry, data.Event):
pbdir = convert_Event(entry)
elif isinstance(entry, data.Note):
pbdir = convert_Note(entry)
elif isinstance(entry, data.Query):
pbdir = convert_Query(entry)
elif isinstance(entry, data.Price):
pbdir = convert_Price(entry)
elif isinstance(entry, data.Balance):
pbdir = convert_Balance(entry)
elif isinstance(entry, data.Pad):
pbdir = convert_Pad(entry)
else:
pbdir = None
if pbdir is not None:
write("#---")
write("# {}".format(pbdir.location.lineno))
write("#")
write(pbdir)
write("")
if 0:
print("-" * 80)
printer.print_entry(entry)
print(txn)
print()
if hasattr(writer, "close"):
writer.close()
output.close() | null |
3,217 | from decimal import Decimal
from os import path
import argparse
import datetime
import functools
import itertools
import logging
import re
from typing import Optional
from google.protobuf import text_format
from beancount import loader
from beancount.parser import parser
from beancount.parser import printer
from beancount.core import data
from beancount.core import amount
from beancount.core import position
from beancount.core.number import MISSING
from beancount.ccore import data_pb2 as pb
from beancount.ccore import date_pb2 as db
from beancount.ccore import number_pb2 as nb
from beancount.cparser import inter_pb2 as qb
from beancount.cparser import options_pb2 as ob
from beancount.parser import printer
def entry_sortkey_v3(dir: pb.Directive):
def export_v3_data(filename: str, output_filename: str, num_directives: Optional[int]):
ledger = extmodule.parse(filename)
directives = sorted(ledger.directives, key=entry_sortkey_v3)
with open(output_filename, "w") as outfile:
pr = functools.partial(print, file=outfile)
if num_directives:
directives = itertools.islice(directives, num_directives)
for directive in directives:
extmodule.DowngradeToV2(directive)
pr("#---")
pr("# {}".format(directive.location.lineno))
pr("#")
pr(text_format.MessageToString(directive, as_utf=True))
pr() | null |
3,218 | import sqlite3 as dbapi
import logging
import sys
import os
import itertools
from os import path
from decimal import Decimal
import click
from beancount import loader
from beancount.core import data
from beancount.utils import misc_utils
from beancount.parser.version import VERSION
The provided code snippet includes necessary dependencies for implementing the `output_common` function. Write a Python function `def output_common(connection, unused_entries)` to solve the following problem:
Create a table of common data for all entries. Args: connection: A DBAPI-2.0 Connection object. entries: A list of directives.
Here is the function:
def output_common(connection, unused_entries):
"""Create a table of common data for all entries.
Args:
connection: A DBAPI-2.0 Connection object.
entries: A list of directives.
"""
with connection:
connection.execute("""
CREATE TABLE entry (
id INTEGER PRIMARY KEY,
date DATE,
type CHARACTER(8),
source_filename STRING,
source_lineno INTEGER
);
""") | Create a table of common data for all entries. Args: connection: A DBAPI-2.0 Connection object. entries: A list of directives. |
3,219 | import sqlite3 as dbapi
import logging
import sys
import os
import itertools
from os import path
from decimal import Decimal
import click
from beancount import loader
from beancount.core import data
from beancount.utils import misc_utils
from beancount.parser.version import VERSION
The provided code snippet includes necessary dependencies for implementing the `output_transactions` function. Write a Python function `def output_transactions(connection, entries)` to solve the following problem:
Create a table for transactions and fill in the data. Args: connection: A DBAPI-2.0 Connection object. entries: A list of directives.
Here is the function:
def output_transactions(connection, entries):
"""Create a table for transactions and fill in the data.
Args:
connection: A DBAPI-2.0 Connection object.
entries: A list of directives.
"""
with connection:
connection.execute("""
CREATE TABLE transactions_detail (
id INTEGER PRIMARY KEY,
flag CHARACTER(1),
payee VARCHAR,
narration VARCHAR,
tags VARCHAR, -- Comma-separated
links VARCHAR -- Comma-separated
);
""")
connection.execute("""
CREATE VIEW transactions AS
SELECT * FROM entry JOIN transactions_detail USING (id);
""")
connection.execute("""
CREATE TABLE postings (
posting_id INTEGER PRIMARY KEY,
id INTEGER,
flag CHARACTER(1),
account VARCHAR,
number DECIMAL(16, 6),
currency CHARACTER(10),
cost_number DECIMAL(16, 6),
cost_currency CHARACTER(10),
cost_date DATE,
cost_label VARCHAR,
price_number DECIMAL(16, 6),
price_currency CHARACTER(10),
FOREIGN KEY(id) REFERENCES entries(id)
);
""")
postings_count = iter(itertools.count())
with connection:
for eid, entry in enumerate(entries):
if not isinstance(entry, data.Transaction):
continue
connection.execute("""
insert into entry values (?, ?, ?, ?, ?);
""", (eid, entry.date, 'txn', entry.meta["filename"], entry.meta["lineno"]))
connection.execute("""
insert into transactions_detail values (?, ?, ?, ?, ?, ?);
""", (eid, entry.flag, entry.payee, entry.narration,
','.join(entry.tags or ()), ','.join(entry.links or ())))
for posting in entry.postings:
pid = next(postings_count)
units = posting.units
cost = posting.cost
price = posting.price
connection.execute("""
INSERT INTO postings VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""", (pid, eid,
posting.flag,
posting.account,
units.number,
units.currency,
cost.number if cost else None,
cost.currency if cost else None,
cost.date if cost else None,
cost.label if cost else None,
price.number if price else None,
price.currency if price else None)) | Create a table for transactions and fill in the data. Args: connection: A DBAPI-2.0 Connection object. entries: A list of directives. |
3,220 | import sqlite3 as dbapi
import logging
import sys
import os
import itertools
from os import path
from decimal import Decimal
import click
from beancount import loader
from beancount.core import data
from beancount.utils import misc_utils
from beancount.parser.version import VERSION
def adapt_decimal(number):
"""Adapt a Decimal instance to a string for creating queries.
Args:
number: An instance of Decimal.
Returns:
A string.
"""
return str(number)
def convert_decimal(string):
"""Convert a Decimal string to a Decimal instance.
Args:
string: A decimal number in a string.
Returns:
An instance of Decimal.
"""
return Decimal(string)
The provided code snippet includes necessary dependencies for implementing the `setup_decimal_support` function. Write a Python function `def setup_decimal_support()` to solve the following problem:
Setup sqlite3 to support conversions to/from Decimal numbers.
Here is the function:
def setup_decimal_support():
"""Setup sqlite3 to support conversions to/from Decimal numbers.
"""
dbapi.register_adapter(Decimal, adapt_decimal)
dbapi.register_converter("decimal", convert_decimal) | Setup sqlite3 to support conversions to/from Decimal numbers. |
3,221 | from os import path
import argparse
import datetime
import hashlib
import json
import logging
import os
import pickle
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def download_doc(files: object, docid: str,
extension: str, mime_type: str,
output_dir: str) -> str:
metadata = files.get(fileId=docid).execute()
name = metadata['name']
clean_name = re.sub('_-_', '-',
re.sub('_+', '_',
re.sub('[^A-Za-z0-9=-]', '_', name)))
filename = path.join(output_dir, docid, '{}.{}'.format(clean_name, extension))
os.makedirs(path.dirname(filename), exist_ok=True)
if path.exists(filename) and path.getsize(filename):
logging.warning("File already present: %s; skipping", filename)
return filename
try:
with open(filename, 'wb') as outfile:
exported = files.export(fileId=docid,
mimeType=mime_type).execute()
outfile.write(exported)
except apiclient.errors.HttpError as exc:
logging.error("Skipping; Error downloading: %s", exc)
filename = None
return filename | null |
3,222 | from os import path
import argparse
import datetime
import hashlib
import json
import logging
import os
import pickle
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
INDEX_DOCID = '1RaondTJCS_IUPBHFNdT8oqFKJjVJDsfsn6JEjBG04eA'
The provided code snippet includes necessary dependencies for implementing the `get_docids_from_index` function. Write a Python function `def get_docids_from_index(files: discovery.Resource)` to solve the following problem:
Given a files service, get the doc list of doc ids from the index page.
Here is the function:
def get_docids_from_index(files: discovery.Resource):
"""Given a files service, get the doc list of doc ids from the index page.
"""
html = files.export(fileId=INDEX_DOCID, mimeType='text/html').execute()
doc = bs4.BeautifulSoup(html, 'lxml')
for a in doc.find_all('a'):
href = a['href']
#print(href)
match = re.search(r'document/d/(.*)/', href)
if not match:
continue
#print(match.group(1))
yield match.group(1) | Given a files service, get the doc list of doc ids from the index page. |
3,223 | from os import path
import argparse
import datetime
import hashlib
import json
import logging
import os
import pickle
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
SERVICE_ACCOUNT_FILE = path.join(os.environ['HOME'],
'.google-apis-service-account.json')
The provided code snippet includes necessary dependencies for implementing the `get_auth_via_service_account` function. Write a Python function `def get_auth_via_service_account(scopes: List[str])` to solve the following problem:
Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs.
Here is the function:
def get_auth_via_service_account(scopes: List[str]):
"""Get an authenticated http object via a service account.
Args:
scopes: A string or a list of strings, the scopes to get credentials for.
Returns:
A pair or (credentials, http) objects, where 'http' is an authenticated
http client object, from which you can use the Google APIs.
"""
credentials = service_account.ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, scopes)
http = httplib2.Http()
credentials.authorize(http)
return credentials, http | Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs. |
3,224 | import argparse
import logging
import zipfile
import io
import re
import os
import threading
import subprocess
from os import path
from typing import List
from typing import Tuple
from pprint import pprint
import bs4
def ConvertToMarkdown(text: str, isbold: bool, isitalic: bool):
text = text.replace('*', r'\*')
text = text.replace('"', r'\"')
text = text.replace('_', r'\_')
text = text.replace('<', r'\<')
text = text.replace('>', r'\>')
#text = text.replace('{...}', r'{\...}')
#text = text.replace("…", "\\…")
if isbold:
text = '**{}**'.format(text)
elif isitalic:
text = '*{}*'.format(text)
return text | null |
3,225 | import argparse
import logging
import zipfile
import io
import re
import os
import threading
import subprocess
from os import path
from typing import List
from typing import Tuple
from pprint import pprint
import bs4
save = _Save()
The provided code snippet includes necessary dependencies for implementing the `GetMarkdownBlocks` function. Write a Python function `def GetMarkdownBlocks(lines: List[str]) -> List[Tuple[int, int, str]]` to solve the following problem:
Get the list of blockquotes from the markdown file (and their positions).
Here is the function:
def GetMarkdownBlocks(lines: List[str]) -> List[Tuple[int, int, str]]:
"""Get the list of blockquotes from the markdown file (and
their positions)."""
blocks = []
block = []
for index, line in enumerate(lines):
if save(re.match(r'> (.*)\\?', line)):
text = save.value.group(1).rstrip('\\').replace(r'\...', '...')
block.append((index, text))
elif block:
blocks.append((block[0][0],
block[-1][0],
[line for _, line in block]))
block = []
if block:
blocks.append((block[0][0],
block[-1][0],
[line for _, line in block]))
return blocks | Get the list of blockquotes from the markdown file (and their positions). |
3,226 | import argparse
import logging
import zipfile
import io
import re
import os
import threading
import subprocess
from os import path
from typing import List
from typing import Tuple
from pprint import pprint
import bs4
def ConvertToRst(text: str, isbold: bool, isitalic: bool):
text = text.replace('*', r'\*')
if isbold:
text = '**{}**'.format(text)
elif isitalic:
text = '*{}*'.format(text)
return text
def GetDocxBlocks(filename: str, convert: callable=None) -> List[str]:
"""Get the list of blocks formatted with a fixed-width font
from the docs file. Note that this will include short blocks
which aren't necessarily blockquotes."""
with zipfile.ZipFile(filename, 'r') as myzip:
with myzip.open('word/document.xml') as docfile:
soup = bs4.BeautifulSoup(docfile, 'lxml')
blocks = []
for wp in soup.find_all('w:p'):
if not wp.find('w:rfonts', attrs={'w:cs': "Consolas"}):
continue
block = io.StringIO()
for wr in wp.find_all('w:r'):
#if wr.find_all('w:rfonts', attrs={'w:cs': "Consolas"}):
#print(wr.prettify())
#print('XXXNEW')
for w in wr.find_all(re.compile('w:(t|br)')):
bold = wr.find('w:b', attrs={'w:val': '1'}) is not None
italic = wr.find('w:i', attrs={'w:val': '1'}) is not None
if w.name == 'w:t':
text = w.text
if convert is not None:
text = convert(text, bold, italic)
block.write(text)
#print('XXX', w.text)
elif w.name == 'w:br':
block.write('\n')
#print('XXX', w.text)
# print(',' + '-'*80)
value = block.getvalue().splitlines()
if value:
# Cull out extracted bits which aren't likely to be blockquotes.
if len(value) == 1 and len(value[0]) > 80:
continue
blocks.append(value)
# print(block.getvalue().replace(' ', '_'))
# print('`' + '-'*80)
return blocks
def GetRstBlocks(lines: List[str]) -> List[Tuple[int, int, str]]:
"""Get the list of blockquotes from the markdown file (and
their positions)."""
# Preprocess the rst text so that one-line blocks are prefixed with pipes
# uniformly.
inlens = []
for index, line in enumerate(lines):
match = re.match(r"([ ]*)(.*)", line)
assert match
inlens.append((len(match.group(1)), len(match.group(2))))
if len(inlens) < 3:
continue
if (inlens[-3] == (0,0) and
inlens[-2][0] == 4 and inlens[-2][1] > 0 and
inlens[-1] == (0,0)):
lines[index-1] = " | {}".format(lines[index-1].lstrip())
blocks = []
block = []
for index, line in enumerate(lines):
if save(re.match(r' \| (.*)', line)):
text = save.value.group(1)
block.append((index, text))
elif block and save(re.match(r' (.*)', line)):
text = save.value.group(1)
index, prev_text = block.pop(-1)
block.append((index, "{} {}".format(prev_text, text)))
elif block:
blocks.append((block[0][0],
block[-1][0],
[line for _, line in block]))
block = []
if block:
blocks.append((block[0][0],
block[-1][0],
[line for _, line in block]))
return blocks
def ComputeKey(lines: List[str], remove_matching_stars: bool=True) -> str:
"""Reduce a snippet to a comparable key."""
newlines = []
for line in lines:
# Remove ellipsis
line = line.replace("…", "")
line = line.replace("...", "")
# Compress whitespace.
line = re.sub(r'[ \t]+', ' ', line)
if remove_matching_stars:
for pattern in [r'\*\*(.*?)\*\*', r'\*(.*?)\*']:
n = 1
while n > 0:
line, n = re.subn(pattern, r'\1', line)
line = line.strip()
if line:
newlines.append(line)
return ''.join(newlines)
def PandocDocxToRst(filename: str) -> str:
rst_string = subprocess.check_output(
['pandoc', '-f', 'docx', '-t', 'rst', filename],
shell=False, encoding='utf8')
return [s.rstrip() for s in rst_string.splitlines()]
def ConvertDocx(filename: str) -> str:
blocks_docx = GetDocxBlocks(filename, ConvertToRst)
lines_txt = PandocDocxToRst(filename)
blocks_txt = GetRstBlocks(lines_txt)
docx = [(ComputeKey(block, True), block)
for block in blocks_docx]
map_docx = dict(docx)
map_txt = {ComputeKey(block, True): (minline, maxline, block)
for minline, maxline, block in blocks_txt}
# print('BLOCKS_DOCX')
# for block in blocks_docx:
# print('*' * 80)
# for line in block:
# print(repr(line))
# print()
# print('BLOCKS_TXT')
# for block in blocks_txt:
# print('*' * 80)
# for line in block:
# print(repr(line))
# print()
matches = []
for key, block in docx:
# print('-' * 80)
# print('KEY', key)
try:
minline, maxline, block_txt = map_txt.pop(key)
block_docx = map_docx.pop(key)
matches.append((key, (minline, maxline), block_txt, block_docx))
except KeyError:
pass
# Trace matching lines.
if 0:
print('MATCHING')
for key, (minline, maxline), block_txt, block_docx in matches:
print('-' * 120)
print('DOCX')
for line in block_docx:
print(repr(line))
print('TXT')
for line in block_txt:
print(repr(line))
#print('=' * 120)
if map_docx:
print('NOTFOUND DOCX:')
for key, block in map_docx.items():
print('-' * 120)
for line in block:
print(repr(line))
if map_txt:
print('NOTFOUND TXT:')
for key, (minline, maxline, block) in map_txt.items():
print('-' * 120)
for line in block:
print(repr(line))
# Replace blocks in the original md file.
offset = 0
for key, (minline, maxline), block_txt, block_docx in matches:
minline += offset
maxline += offset
del lines_txt[minline:maxline+1]
new_lines = [' | {}'.format(line) for line in block_docx]
lines_txt[minline:minline] = new_lines
offset += len(new_lines) - (maxline + 1 - minline)
return os.linesep.join(lines_txt) | null |
3,227 | import argparse
import logging
import zipfile
import io
import re
import os
import threading
import subprocess
from os import path
from typing import List
from typing import Tuple
from pprint import pprint
import bs4
def FindDocxFiles(root: str):
if path.isdir(root):
for root, _, files in os.walk(root):
for filename in files:
if re.search(r'.docx$', filename):
yield path.join(root, filename)
elif re.search(r'.docx$', root):
yield root | null |
3,228 | import logging
import os
import subprocess
import re
import pickle
import hashlib
import shelve
from os import path
import httplib2
from oauth2client import service_account
The provided code snippet includes necessary dependencies for implementing the `find_index_document` function. Write a Python function `def find_index_document(files)` to solve the following problem:
Find the document of Beancount index. Args: files: A Cached API client object with Google Drive scope. Returns: A string, the document id.
Here is the function:
def find_index_document(files):
"""Find the document of Beancount index.
Args:
files: A Cached API client object with Google Drive scope.
Returns:
A string, the document id.
"""
query = "name = 'Beancount - Index'"
listing = files.list(q=query).execute()
files = listing['files']
if len(files) != 1:
raise ValueError("Could not find the index file: "
"{} files matched".format(len(files)))
for file in files:
return file['id'] | Find the document of Beancount index. Args: files: A Cached API client object with Google Drive scope. Returns: A string, the document id. |
3,229 | import logging
import os
import subprocess
import re
import pickle
import hashlib
import shelve
from os import path
import httplib2
from oauth2client import service_account
The provided code snippet includes necessary dependencies for implementing the `enumerate_linked_documents` function. Write a Python function `def enumerate_linked_documents(files, indexid)` to solve the following problem:
Given a document id, enumerate the links within it. Args: files: A Cached API client object with Google Drive scope. indexid: A string, a document id. Returns: A list of link strings.
Here is the function:
def enumerate_linked_documents(files, indexid):
"""Given a document id, enumerate the links within it.
Args:
files: A Cached API client object with Google Drive scope.
indexid: A string, a document id.
Returns:
A list of link strings.
"""
doc = files.export(fileId=indexid,
mimeType='text/html').execute()
contents = doc.decode('utf8')
docids = [indexid]
for match in re.finditer('https?://docs.google.com/document/d/([^/";&]+)', contents):
docid = match.group(1)
if docid not in docids:
docids.append(docid)
return docids | Given a document id, enumerate the links within it. Args: files: A Cached API client object with Google Drive scope. indexid: A string, a document id. Returns: A list of link strings. |
3,230 | import logging
import os
import subprocess
import re
import pickle
import hashlib
import shelve
from os import path
import httplib2
from oauth2client import service_account
CONVERSION_MAP = {
'html': ('text/html', None),
'txt': ('text/plain', None),
'rtf': ('application/rtf', None),
'pdf': ('application/pdf', convert_pdf),
'odt': ('application/vnd.oasis.opendocument.text', None),
'docx': ('application/vnd.openxmlformats-officedocument.wordprocessingml.document',
None),
}
The provided code snippet includes necessary dependencies for implementing the `download_docs` function. Write a Python function `def download_docs(files, docids, outdir, extension)` to solve the following problem:
Download all the Beancount documents to a temporary directory. Args: files: A Cached API client object with Google Drive scope. docids: A list of string, the document ids to download. outdir: A string, the name of the directory where to store the files. extension: A string, the extension of the requested documents. Returns: A list of string, the names of the downloaded files.
Here is the function:
def download_docs(files, docids, outdir, extension):
"""Download all the Beancount documents to a temporary directory.
Args:
files: A Cached API client object with Google Drive scope.
docids: A list of string, the document ids to download.
outdir: A string, the name of the directory where to store the files.
extension: A string, the extension of the requested documents.
Returns:
A list of string, the names of the downloaded files.
"""
mime_type, _ = CONVERSION_MAP[extension]
filenames = []
for index, docid in enumerate(docids, 1):
# Get the document metadata.
metadata = files.get(fileId=docid).execute()
name = metadata['name']
# Retrieve to a file.
clean_name = re.sub('_-_', '-',
re.sub('_+', '_',
re.sub('[^A-Za-z0-9=-]', '_', name)))
filename = path.join(outdir, '{}.{}'.format(clean_name, extension))
logging.info('Exporting "{}" ({}) to {}'.format(name, docid, filename))
with open(filename, 'wb') as outfile:
exported = files.export(fileId=docid,
mimeType=mime_type).execute()
outfile.write(exported)
# Check if the downloaded succeeded.
if path.getsize(filename) == 0:
logging.error("Invalid download, skipping file for '{}'.".format(docid))
continue
filenames.append(filename)
return filenames | Download all the Beancount documents to a temporary directory. Args: files: A Cached API client object with Google Drive scope. docids: A list of string, the document ids to download. outdir: A string, the name of the directory where to store the files. extension: A string, the extension of the requested documents. Returns: A list of string, the names of the downloaded files. |
3,231 | import logging
import os
import subprocess
import re
import pickle
import hashlib
import shelve
from os import path
import httplib2
from oauth2client import service_account
def collate_pdf_filenames(filenames, output_filename):
"""Combine the list of PDF filenames together into a single file.
Args:
filenames: A list of filename strings.
output_filename: A string, the name of the output file.
Raises:
IOError: If we could not produce the merged filename.
"""
command = ['pdftk'] + filenames + ['cat', 'output', output_filename]
try:
pipe = subprocess.Popen(command, shell=False)
pipe.communicate()
except (FileNotFoundError, PermissionError) as exc:
raise SystemExit('pdftk is probably not installed: {}'.format(exc))
if pipe.returncode != 0:
raise IOError("Could not produce output '{}'".format(output_filename))
The provided code snippet includes necessary dependencies for implementing the `convert_pdf` function. Write a Python function `def convert_pdf(filenames, output)` to solve the following problem:
Process downloaded PDF files. Args: filenames: A list of filename strings. output_filename: A string, the name of the output file.
Here is the function:
def convert_pdf(filenames, output):
"""Process downloaded PDF files.
Args:
filenames: A list of filename strings.
output_filename: A string, the name of the output file.
"""
collate_pdf_filenames(filenames, output) | Process downloaded PDF files. Args: filenames: A list of filename strings. output_filename: A string, the name of the output file. |
3,232 | import logging
import os
import subprocess
import re
import pickle
import hashlib
import shelve
from os import path
import httplib2
from oauth2client import service_account
SERVICE_ACCOUNT_FILE = path.join(os.environ['HOME'],
'.google-apis-service-account.json')
The provided code snippet includes necessary dependencies for implementing the `get_auth_via_service_account` function. Write a Python function `def get_auth_via_service_account(scopes)` to solve the following problem:
Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs.
Here is the function:
def get_auth_via_service_account(scopes):
"""Get an authenticated http object via a service account.
Args:
scopes: A string or a list of strings, the scopes to get credentials for.
Returns:
A pair or (credentials, http) objects, where 'http' is an authenticated
http client object, from which you can use the Google APIs.
"""
credentials = service_account.ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, scopes)
http = httplib2.Http()
credentials.authorize(http)
return credentials, http | Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs. |
3,233 | import argparse
import logging
import os
import tempfile
import subprocess
import tempfile
from os import path
from apiclient import discovery
import docs
def pandoc(filename, informat):
cwd = path.dirname(path.abspath(__file__))
command = [
'pandoc', '-f', informat, '-t', 'markdown',
'--filter', path.join(cwd, 'convert_filter_docx.py'),
filename]
print(' '.join(command))
return subprocess.check_output(command).decode('utf8') | null |
3,234 | import sys
import pandocfilters
def caps(key, value, format, meta):
if key == 'BlockQuote':
print(value, file=sys.stderr) | null |
3,235 | import os
import re
from os import path
BEANCOUNT_DIR = os.environ.get("BEANCOUNT_DIR",
path.expanduser("~/.beancount"))
The provided code snippet includes necessary dependencies for implementing the `get_file` function. Write a Python function `def get_file(filename)` to solve the following problem:
Return a filename ensuring its parent directory. Args: filename: A string, the filename. If the filename is relative, it is created relative to BEANCOUNT_DIR.
Here is the function:
def get_file(filename):
"""Return a filename ensuring its parent directory.
Args:
filename: A string, the filename. If the filename is relative, it is
created relative to BEANCOUNT_DIR.
"""
if not path.isabs(filename):
filename = path.join(BEANCOUNT_DIR, filename)
os.makedirs(path.dirname(filename), exist_ok=True)
return filename | Return a filename ensuring its parent directory. Args: filename: A string, the filename. If the filename is relative, it is created relative to BEANCOUNT_DIR. |
3,236 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def _get(obj, path):
for comp in path.split('/'):
if comp not in obj:
return
obj = obj[comp]
return obj
TextRun = collections.namedtuple('TextRun', 'text family')
def parse_TextRun(contents):
family = _get(contents, 'textStyle/weightedFontFamily/fontFamily')
return TextRun(contents['content'], family) | null |
3,237 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_AutoText(contents):
raise NotImplementedError | null |
3,238 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_PageBreak(contents):
pass | null |
3,239 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_ColumnBreak(contents):
raise NotImplementedError | null |
3,240 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_FootnoteReference(contents):
pass
#raise NotImplementedError(pprint.pformat(contents)) | null |
3,241 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_HorizontalRule(contents):
pass | null |
3,242 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_Equation(contents):
raise NotImplementedError | null |
3,243 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_InlineObjectElement(contents):
pass
#raise NotImplementedError | null |
3,244 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_SectionBreak(econtents):
assert econtents.keys() == {'sectionStyle'}, econtents | null |
3,245 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_Table(econtents):
pass
#raise NotImplementedError | null |
3,246 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
TextRun = collections.namedtuple('TextRun', 'text family')
def parse_Element(elem):
return _dispatch(_dispatch_Element, elem)
def parse_Paragraph(paragraph):
style = paragraph['paragraphStyle']['namedStyleType']
# Compress runs of text together.
parelems = []
for element in paragraph['elements']:
pelem = parse_Element(element)
if isinstance(pelem, TextRun):
last = parelems[-1] if parelems else None
if last and isinstance(last, TextRun) and last.family == pelem.family:
parelems.pop(-1)
pelem = TextRun(last.text + pelem.text, last.family)
parelems.append(pelem)
else:
assert pelem is None
# Convert all the hard newlines to soft ones.
parelems = [elem._replace(text=elem.text.replace('\x0b', '\n'))
if isinstance(elem, TextRun)
else elem
for elem in parelems]
return (style, parelems) | null |
3,247 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_TableOfContents(econtents):
assert econtents.keys() == {'content'}, econtents.keys() | null |
3,248 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def parse_Body(body):
assert set(body.keys()) == {'content'}
return list(filter(None, [parse_StructuralElement(selem)
for selem in body['content']]))
def parse_Document(document):
return (document['title'], parse_Body(document['body'])) | null |
3,249 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
The provided code snippet includes necessary dependencies for implementing the `remove_default_fonts` function. Write a Python function `def remove_default_fonts(body, default_font='Cambria')` to solve the following problem:
Remove text runs with the default font.
Here is the function:
def remove_default_fonts(body, default_font='Cambria'):
"""Remove text runs with the default font."""
new_body = []
for etype, runs in body:
new_runs = []
for run in runs:
if run.family == default_font:
run = run._replace(family=None)
new_runs.append(run)
new_body.append((etype, new_runs))
return new_body | Remove text runs with the default font. |
3,250 | from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
The provided code snippet includes necessary dependencies for implementing the `merge_runs` function. Write a Python function `def merge_runs(body)` to solve the following problem:
Merge consecutive text runs with the same font.
Here is the function:
def merge_runs(body):
"""Merge consecutive text runs with the same font."""
new_body = []
for etype, runs in body:
new_runs = []
last_run = None
for run in runs:
if last_run is None:
last_run = run
elif run.family == last_run.family:
run = last_run = run._replace(text=(last_run.text + run.text))
new_runs.pop(-1)
new_runs.append(run)
new_body.append((etype, new_runs))
return new_body | Merge consecutive text runs with the same font. |
3,251 | from os import path
import argparse
import re
import textwrap
def get_deps(filename):
deps = set()
for line in open(filename):
if 'beancount' not in line:
continue
line = re.sub(r" as (.*)$", "", line)
dep = None
match = re.match(r"from (.*) import (.*)$", line)
if match:
if re.match(r"[a-z]", match.group(2)):
dep = ".".join(match.group(1, 2))
else:
dep = match.group(1)
else:
match = re.match(r"import (.*)$", line)
if match:
dep = match.group(1)
if dep is not None:
deps.add(dep)
return deps | null |
3,252 | from os import path
import argparse
import re
import textwrap
def render(filename, deps):
basename = path.basename(filename)
name = basename.replace(".py", "")
target = "py_test" if re.search(r"_test.py$", basename) else "py_library"
print(textwrap.dedent(f"""\
{target}(
name = "{name}",
srcs = ["{basename}"],
deps = ["""))
for dep in sorted(deps):
libdep = dep.replace(".", "/")
sdep = list(libdep)
sdep[libdep.rindex("/")] = ":"
libdep = "".join(sdep)
print(r' "//{}",'.format(libdep))
print(textwrap.dedent("""\
],
)
""")) | null |
3,253 | from typing import List, Optional, Dict, Any, Mapping, Iterator, Callable, Tuple
import argparse
import json
import functools
import re
from googleapiclient import discovery
from beancount.tools import gapis
def iter_links(document: Json) -> List[Tuple[str, str]]:
"""Find all the links and return them."""
for jpath in find_links(document, 'link'):
for item in jpath:
if 'textRun' in item:
content = item['textRun']['content']
link = item['textRun']['textStyle']['link']
if 'url' not in link:
continue
url = link['url']
yield (url, content, item)
def process_links(document: Json,
func: Callable[[str, str], Optional[str]]) -> List[Json]:
"""Find all the links and prepare updates.
Outputs a list of batchUpdate requests to apply."""
requests = []
for url, content, item in iter_links(document):
proposed_url = func(url, content)
if proposed_url:
requests.append({
'updateTextStyle': {
'range': {'startIndex': item['startIndex'],
'endIndex': item['endIndex']},
'textStyle': {'link': {'url': proposed_url}},
'fields': 'link'
}})
return requests
def propose_url(mapping: Dict[str, str], url: str, unused_content: str) -> Optional[str]:
"""Process a URL, and optionally propose a replacement."""
try:
return mapping[url]
except KeyError:
pass
The provided code snippet includes necessary dependencies for implementing the `transform_links` function. Write a Python function `def transform_links(service, docid: str, mapping: Dict[str, str], dry_run: bool)` to solve the following problem:
Run the link transformation.
Here is the function:
def transform_links(service, docid: str, mapping: Dict[str, str], dry_run: bool):
"""Run the link transformation."""
# Get the document.
document = service.documents().get(documentId=docid).execute()
links = list(iter_links(document))
width = max(len(url) for url, _, __ in links)
for url, content, _ in links:
print(f"# {url:{width}} {content}")
if links:
print()
if dry_run:
# Print the links only.
string = json.dumps({url: url for url, _, __ in links}, indent=4, sort_keys=True)
string = re.sub(r'",', '",\n', re.sub(r": ", "\n ", string))
print(string)
# Create replacement requests.
requests = process_links(document, functools.partial(propose_url, mapping))
if dry_run:
return
# Put together a batch update.
if requests:
# Execute them.
print("Sending {} requests".format(len(requests)))
service.documents().batchUpdate(
documentId=docid,
body={'requests': list(reversed(requests))}).execute()
else:
print("No changes.") | Run the link transformation. |
3,254 | import argparse
import logging
import re
import os
from os import path
from apiclient import discovery
import httplib2
from apiclient.http import MediaInMemoryUpload
from oauth2client import service_account
from beancount.parser import options
from beancount.utils import test_utils
The provided code snippet includes necessary dependencies for implementing the `replace_gdocs_document` function. Write a Python function `def replace_gdocs_document(http, docid, title, contents)` to solve the following problem:
Upload new contents for a Google Doc for a plain/text file. Args: http: An http connection object with drive credentials. docid: A string, the ID of the document. title: A string, the title of the document. contents: A string, the body of the document.
Here is the function:
def replace_gdocs_document(http, docid, title, contents):
"""Upload new contents for a Google Doc for a plain/text file.
Args:
http: An http connection object with drive credentials.
docid: A string, the ID of the document.
title: A string, the title of the document.
contents: A string, the body of the document.
"""
service = discovery.build('drive', 'v3', http=http)
media = MediaInMemoryUpload(contents.encode('utf8'),
mimetype='text/plain',
resumable=True)
return service.files().update(
fileId=docid,
body={'name': title},
media_body=media).execute() | Upload new contents for a Google Doc for a plain/text file. Args: http: An http connection object with drive credentials. docid: A string, the ID of the document. title: A string, the title of the document. contents: A string, the body of the document. |
3,255 | import argparse
import logging
import re
import os
from os import path
from apiclient import discovery
import httplib2
from apiclient.http import MediaInMemoryUpload
from oauth2client import service_account
from beancount.parser import options
from beancount.utils import test_utils
The provided code snippet includes necessary dependencies for implementing the `get_options_docid` function. Write a Python function `def get_options_docid()` to solve the following problem:
Find the options doc id from the redirect file. Returns: The id of the doc to fix up.
Here is the function:
def get_options_docid():
"""Find the options doc id from the redirect file.
Returns:
The id of the doc to fix up.
"""
htaccess = path.join(test_utils.find_repository_root(__file__), '.nginx.conf')
with open(htaccess) as inht:
lines = list(filter(
None, map(
re.compile(r'.*/doc/options.*(https?://docs.google.com/.*);').match,
inht.readlines())))
assert len(lines) == 1
return list(filter(None, lines[0].group(1).split('/')))[-1] | Find the options doc id from the redirect file. Returns: The id of the doc to fix up. |
3,256 | import argparse
import logging
import re
import os
from os import path
from apiclient import discovery
import httplib2
from apiclient.http import MediaInMemoryUpload
from oauth2client import service_account
from beancount.parser import options
from beancount.utils import test_utils
SERVICE_ACCOUNT_FILE = os.path.expanduser('~/.google-apis-service-account.json')
The provided code snippet includes necessary dependencies for implementing the `get_auth_via_service_account` function. Write a Python function `def get_auth_via_service_account(scopes)` to solve the following problem:
Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs.
Here is the function:
def get_auth_via_service_account(scopes):
"""Get an authenticated http object via a service account.
Args:
scopes: A string or a list of strings, the scopes to get credentials for.
Returns:
A pair or (credentials, http) objects, where 'http' is an authenticated
http client object, from which you can use the Google APIs.
"""
credentials = service_account.ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, scopes)
http = httplib2.Http()
credentials.authorize(http)
return credentials, http | Get an authenticated http object via a service account. Args: scopes: A string or a list of strings, the scopes to get credentials for. Returns: A pair or (credentials, http) objects, where 'http' is an authenticated http client object, from which you can use the Google APIs. |
3,257 | import ast
import os
import argparse
import logging
import sys
from os import path
def find_files(rootdir):
if path.isfile(rootdir):
yield rootdir
for root, dirs, files in os.walk(rootdir):
for filename in files:
if filename.endswith('.py'):
yield path.join(root, filename) | null |
3,258 | import ast
import os
import argparse
import logging
import sys
from os import path
def get_name(node):
if isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Attribute):
return node.attr | null |
3,259 | import sys
import argparse
import logging
def gen_inputs(template, args):
for mask in range(2 ** len(args)):
actual_args = [arg if not (1<<i & mask) else ''
for i, arg in enumerate(args)]
sys.stdout.write(template.format(*actual_args)) | null |
3,260 | import argparse
import logging
import os
import shutil
import tempfile
import subprocess
from os import path
OVERLAY_DOCS = {
'Beancount-Motivation' : 'cookbook/cl_cookbook.rst',
'Beancount-Trading_with_Beancount' : 'cookbook/trading.rst',
'Beancount-Cookbook-Vesting' : 'cookbook/stock_vesting.rst',
'Beancount-Cookbook-Sharing_Expenses' : 'cookbook/sharing_expenses.rst',
}
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(inputdir, sphinxdir)` to solve the following problem:
Process downloaded OpenOffice files to reStructuredText. Args: inputdir: The name of an input directory with ODT files. sphinxdir: A string, the name of the beancount-docs directory.
Here is the function:
def convert(inputdir, sphinxdir):
"""Process downloaded OpenOffice files to reStructuredText.
Args:
inputdir: The name of an input directory with ODT files.
sphinxdir: A string, the name of the beancount-docs directory.
"""
filenames = [path.join(inputdir, x) for x in os.listdir(inputdir)]
with tempfile.TemporaryDirectory(suffix=None, prefix=None, dir=None) as tmpdir:
for basename, destname in sorted(OVERLAY_DOCS.items()):
filename_odt = path.join(inputdir, basename + '.odt')
logging.info("Processing %s", filename_odt)
basename = path.splitext(path.basename(filename_odt))[0]
filename_rst = path.join(tmpdir, basename + '.rst')
subprocess.check_call(
['pandoc', '-f', 'odt', '-t', 'rst', '-o', filename_rst, filename_odt],
shell=False)
try:
overlay = OVERLAY_DOCS[basename]
filename_overlay = path.join(sphinxdir, overlay)
logging.info("Copying %s to %s", filename_rst, filename_overlay)
shutil.copyfile(filename_rst, filename_overlay)
except KeyError:
pass
subprocess.check_call(['make', 'html'], cwd=sphinxdir) | Process downloaded OpenOffice files to reStructuredText. Args: inputdir: The name of an input directory with ODT files. sphinxdir: A string, the name of the beancount-docs directory. |
3,261 | import sys
import unicodedata
import argparse
from itertools import count, groupby
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `categorize_unicode` function. Write a Python function `def categorize_unicode()` to solve the following problem:
Return a dictionary mapping Unicode general category names to the characters that belong to them.
Here is the function:
def categorize_unicode():
"""
Return a dictionary mapping Unicode general category names to the characters
that belong to them.
"""
bycat = defaultdict(list)
for c in map(chr, range(sys.maxunicode + 1)):
bycat[unicodedata.category(c)].append(c)
return bycat | Return a dictionary mapping Unicode general category names to the characters that belong to them. |
3,262 | import sys
import unicodedata
import argparse
from itertools import count, groupby
from collections import defaultdict
def groupby_sequences(iterable, keyfunc):
"""
Group items of iterable in groups such that the result of applying keyfunc
to them is consecutive.
"""
if keyfunc is None:
keyfunc = lambda x: x
return groupby(iterable, lambda i, c=count(): keyfunc(i)-next(c))
def py_unicode_range_literal(beg, end):
"""
Convert a range of characters into an unicode range literal
(e.g. \u0064-\u006f), to be used in a Python regular expression's bracket
expression.
"""
if beg == end:
return py_unicode_literal(beg)
else:
return "{}-{}".format(
py_unicode_literal(beg),
py_unicode_literal(end))
The provided code snippet includes necessary dependencies for implementing the `py_unicode_ranges` function. Write a Python function `def py_unicode_ranges(chars)` to solve the following problem:
Convert a set of characters into an string to be used in a Python regular expression's bracket expression (e.g. \u0062\u0064-\u006f).
Here is the function:
def py_unicode_ranges(chars):
"""
Convert a set of characters into an string to be used in a Python
regular expression's bracket expression (e.g. \u0062\u0064-\u006f).
"""
ranges = []
for _, seq in groupby_sequences(chars, ord):
seq = list(seq)
beg = seq[0]
end = seq[-1]
ranges.append(py_unicode_range_literal(beg, end))
return "r'" + "".join(ranges) + "'" | Convert a set of characters into an string to be used in a Python regular expression's bracket expression (e.g. \u0062\u0064-\u006f). |
3,263 | import sys
import unicodedata
import argparse
from itertools import count, groupby
from collections import defaultdict
def list_chunks(l, n):
"""Split list in chunks of size n."""
for it in range(0, len(l), n):
yield l[it:it+n]
def groupby_sequences(iterable, keyfunc):
"""
Group items of iterable in groups such that the result of applying keyfunc
to them is consecutive.
"""
if keyfunc is None:
keyfunc = lambda x: x
return groupby(iterable, lambda i, c=count(): keyfunc(i)-next(c))
def bytes_prefix(bs):
"""
Return all but the last byte from a byte sequence.
"""
return bs[0:-1]
def bytes_last(bs):
"""
Return the last byte from a byte sequence.
"""
return bs[-1]
def lex_byte_range_literal(prefix, beg, end):
pat = lex_byte_literals(prefix)
if beg == end:
pat += lex_byte_literal(beg)
else:
pat += "["
pat += lex_byte_literal(beg)
pat += "-"
pat += lex_byte_literal(end)
pat += "]"
return pat
The provided code snippet includes necessary dependencies for implementing the `lex_unicode_ranges` function. Write a Python function `def lex_unicode_ranges(name, chars)` to solve the following problem:
Convert a set of characters into a string to be used in a lex regular expression
Here is the function:
def lex_unicode_ranges(name, chars):
"""
Convert a set of characters into a string to be used in a lex regular
expression
"""
res = ""
bss = [c.encode("utf-8") for c in chars]
pats = []
for prefix, byprefix in groupby(bss, bytes_prefix):
for _, bysuffix_sequence in groupby_sequences(byprefix, bytes_last):
bysuffix_sequence = list(bysuffix_sequence)
beg = bysuffix_sequence[0][-1]
end = bysuffix_sequence[-1][-1]
pat = lex_byte_range_literal(prefix, beg, end)
pats.append(pat)
partnames = []
MAXPATS = 50
if len(pats) > MAXPATS:
i = 0
for pats in list_chunks(pats, MAXPATS):
partname = "{}-{}".format(name, i)
res += "{}\t{}\n".format(
partname,
"|".join(pats))
partnames.append(partname)
i += 1
res += "{}\t{{{}}}".format(
name,
"}|{".join(partnames))
else:
res += "{}\t{}".format(
name,
"|".join(pats))
return res | Convert a set of characters into a string to be used in a lex regular expression |
3,264 | import argparse
import os
import subprocess
The provided code snippet includes necessary dependencies for implementing the `prevent_run_with_changes` function. Write a Python function `def prevent_run_with_changes()` to solve the following problem:
Fail if some local changes exist.
Here is the function:
def prevent_run_with_changes():
"""Fail if some local changes exist."""
output = subprocess.check_output(["git", "status", "--short"])
if output:
raise RuntimeError("Local changes exist; exiting.") | Fail if some local changes exist. |
3,265 | import argparse
import os
import subprocess
The provided code snippet includes necessary dependencies for implementing the `benchmark_revision` function. Write a Python function `def benchmark_revision(beancount_file: str, revision: str)` to solve the following problem:
Run the benchmark on a particular revision.
Here is the function:
def benchmark_revision(beancount_file: str, revision: str):
"""Run the benchmark on a particular revision."""
args = {'shell': False, 'stdout': subprocess.PIPE}
# Clean up local files. WARNING.
subprocess.check_call(["make", "clean"], **args)
checkout_command = ["git", "reset", "--hard", "HEAD"]
subprocess.check_call(checkout_command, **args)
# Checkout the desired revision.
checkout_command = ["git", "checkout", revision]
subprocess.check_call(checkout_command, **args)
# Build from scratch.
subprocess.check_call(["make", "clean", "build"], **args)
# Run a number of iterations.
meta_command = ["hyperfine", "--warmup=2", "--min-runs=30"]
run_command = ["bean-check --no-cache $L"]
env = os.environ.copy()
env["L"] = beancount_file
subprocess.check_call(meta_command + run_command,
shell=False, env=env) | Run the benchmark on a particular revision. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.