hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c5aa1b6e2e30133c5c77e7e2e17ad7ff9d06114
| 22,699
|
py
|
Python
|
chia/consensus/block_creation.py
|
GabrielFraga962/chia-blockchain
|
313cf27eeefdf6709d3742b437995b7a9442d5a7
|
[
"Apache-2.0"
] | null | null | null |
chia/consensus/block_creation.py
|
GabrielFraga962/chia-blockchain
|
313cf27eeefdf6709d3742b437995b7a9442d5a7
|
[
"Apache-2.0"
] | null | null | null |
chia/consensus/block_creation.py
|
GabrielFraga962/chia-blockchain
|
313cf27eeefdf6709d3742b437995b7a9442d5a7
|
[
"Apache-2.0"
] | null | null | null |
import logging
import random
from dataclasses import replace
from typing import Callable, Dict, List, Optional, Tuple
import blspy
from blspy import G1Element, G2Element
from chiabip158 import PyBIP158
from chia.consensus.block_record import BlockRecord
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.coinbase import create_farmer_coin, create_pool_coin
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.full_node.signage_point import SignagePoint
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
from chia.util.prev_transaction_block import get_prev_transaction_block
from chia.util.recursive_replace import recursive_replace
log = logging.getLogger(__name__)
# TODO: address hint error and remove ignore
# error: Incompatible default for argument "seed" (default has type "bytes", argument has type "bytes32")
# [assignment]
def create_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"", # type: ignore[assignment]
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = bytes32(random.randint(0, 100000000).to_bytes(32, "big"))
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
result: NPCResult = get_name_puzzle_conditions(
block_generator,
constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
cost = calculate_cost_of_program(block_generator.program, result, constants.COST_PER_BYTE)
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
# TODO: address hint error and remove ignore
# error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32"
# [arg-type]
byte_array_tx.append(bytearray(coin.puzzle_hash)) # type: ignore[arg-type]
for coin in removals:
tx_removals.append(coin.name())
# TODO: address hint error and remove ignore
# error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32"
# [arg-type]
byte_array_tx.append(bytearray(coin.name())) # type: ignore[arg-type]
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: bytes32 = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
# TODO: address hint error and remove ignore
# error: Incompatible types in assignment (expression has type "None", variable has type "bytes32")
# [assignment]
foliage_transaction_block_hash = None # type: ignore[assignment]
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
# TODO: address hint error and remove ignore
# error: Incompatible default for argument "seed" (default has type "bytes", argument has type "bytes32")
# [assignment]
def create_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"", # type: ignore[assignment]
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: bytes32 = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
def unfinished_block_to_full_block(
unfinished_block: UnfinishedBlock,
cc_ip_vdf: VDFInfo,
cc_ip_proof: VDFProof,
rc_ip_vdf: VDFInfo,
rc_ip_proof: VDFProof,
icc_ip_vdf: Optional[VDFInfo],
icc_ip_proof: Optional[VDFProof],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
difficulty: uint64,
) -> FullBlock:
"""
Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking
other properties (height, weight, sub-slots, etc)
Args:
unfinished_block: the unfinished block to finish
cc_ip_vdf: the challenge chain vdf info at the infusion point
cc_ip_proof: the challenge chain proof
rc_ip_vdf: the reward chain vdf info at the infusion point
rc_ip_proof: the reward chain proof
icc_ip_vdf: the infused challenge chain vdf info at the infusion point
icc_ip_proof: the infused challenge chain proof
finished_sub_slots: finished sub slots from the prev block to the infusion point
prev_block: prev block from the infusion point
blocks: dictionary from header hash to SBR of all included SBR
total_iters_sp: total iters at the signage point
difficulty: difficulty at the infusion point
"""
# Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information
if prev_block is None:
is_transaction_block = True
new_weight = uint128(difficulty)
new_height = uint32(0)
new_foliage = unfinished_block.foliage
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
new_weight = uint128(prev_block.weight + difficulty)
new_height = uint32(prev_block.height + 1)
if is_transaction_block:
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
new_fbs = unfinished_block.foliage.foliage_transaction_block_signature
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
new_fbh = None
new_fbs = None
new_foliage_transaction_block = None
new_tx_info = None
new_generator = None
new_generator_ref_list = []
assert (new_fbh is None) == (new_fbs is None)
new_foliage = replace(
unfinished_block.foliage,
prev_block_hash=prev_block.header_hash,
foliage_transaction_block_hash=new_fbh,
foliage_transaction_block_signature=new_fbs,
)
ret = FullBlock(
finished_sub_slots,
RewardChainBlock(
new_weight,
new_height,
unfinished_block.reward_chain_block.total_iters,
unfinished_block.reward_chain_block.signage_point_index,
unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash,
unfinished_block.reward_chain_block.proof_of_space,
unfinished_block.reward_chain_block.challenge_chain_sp_vdf,
unfinished_block.reward_chain_block.challenge_chain_sp_signature,
cc_ip_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_signature,
rc_ip_vdf,
icc_ip_vdf,
is_transaction_block,
),
unfinished_block.challenge_chain_sp_proof,
cc_ip_proof,
unfinished_block.reward_chain_sp_proof,
rc_ip_proof,
icc_ip_proof,
new_foliage,
new_foliage_transaction_block,
new_tx_info,
new_generator,
new_generator_ref_list,
)
return recursive_replace(
ret,
"foliage.reward_block_hash",
ret.reward_chain_block.get_hash(),
)
| 42.19145
| 117
| 0.693731
|
c867681fcb955439d4c07216e2c6f69d4bfd01a5
| 1,422
|
py
|
Python
|
examples/plugins/tasks/ipython_kernel/example_panes.py
|
anshsrtv/envisage
|
e50ab70146ed9927d684e2dd093cf2934b029691
|
[
"BSD-3-Clause"
] | 51
|
2015-05-12T01:34:15.000Z
|
2022-03-20T19:11:22.000Z
|
examples/plugins/tasks/ipython_kernel/example_panes.py
|
anshsrtv/envisage
|
e50ab70146ed9927d684e2dd093cf2934b029691
|
[
"BSD-3-Clause"
] | 347
|
2015-02-27T19:51:09.000Z
|
2022-03-21T16:03:01.000Z
|
examples/plugins/tasks/ipython_kernel/example_panes.py
|
anshsrtv/envisage
|
e50ab70146ed9927d684e2dd093cf2934b029691
|
[
"BSD-3-Clause"
] | 11
|
2015-02-11T04:32:54.000Z
|
2021-09-13T10:50:05.000Z
|
# Standard library imports.
import os.path
# Enthought library imports.
from pyface.tasks.api import TraitsDockPane
from traits.api import Event, File, List, Str
from traitsui.api import View, Item, FileEditor
class FileBrowserPane(TraitsDockPane):
""" A simple file browser pane.
"""
#### TaskPane interface ###################################################
id = "example.file_browser_pane"
name = "File Browser"
#### FileBrowserPane interface ############################################
# Fired when a file is double-clicked.
activated = Event
# The list of wildcard filters for filenames.
filters = List(Str)
# The currently selected file.
selected_file = File(os.path.expanduser("~"))
# The view used to construct the dock pane's widget.
view = View(
Item(
"selected_file",
editor=FileEditor(dclick_name="activated", filter_name="filters"),
style="custom",
show_label=False,
),
resizable=True,
)
class PythonScriptBrowserPane(FileBrowserPane):
""" A file browser pane restricted to Python scripts.
"""
#### TaskPane interface ###################################################
id = "example.python_script_browser_pane"
name = "Script Browser"
#### FileBrowserPane interface ############################################
filters = ["*.py"]
| 26.333333
| 79
| 0.563291
|
004a018957da66713fdbaa1bc86260c61a68a7cf
| 4,297
|
py
|
Python
|
openmdao/test_suite/tests/test_general.py
|
ryanfarr01/blue
|
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
|
[
"Apache-2.0"
] | null | null | null |
openmdao/test_suite/tests/test_general.py
|
ryanfarr01/blue
|
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
|
[
"Apache-2.0"
] | null | null | null |
openmdao/test_suite/tests/test_general.py
|
ryanfarr01/blue
|
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
|
[
"Apache-2.0"
] | null | null | null |
"""General tests to demonstrate the parametric suite. Possible arguments are given below (defaults).
To test more than one option, pass in an Iterable of requested options.
All Parametric Groups
---------------------
'group_type': Controls which type of ParametricGroups to test. Will test all groups if not specified
'vector_class': One of ['default', 'petsc'], which vector class to use for the problem. ('default')
'assembled_jac': bool. If an assembled jacobian should be used. (True)
'jacobian_type': One of ['matvec', 'dense', 'sparse-coo', 'sparse-csr', 'sparse-csc']. How the Jacobians are used.
Controls the type of AssembledJacobian. ('matvec')
- 'matvec': Uses compute_jacvec_product.
- 'dense': Uses an ndarray.
- 'sparse-coo': Uses a COOrdinate format sparse matrix.
- 'sparse-csr': Uses a Compressed Sparse Row sparse format.
- 'sparse-csc': Uses a Compressed Sparse Col sparse format.
CycleGroup ('group_type': 'cycle')
----------------------------------
'component_class': One of ['explicit', 'deprecated']. Controls the class of Component to use to
build the group. ('explicit')
'connection_type': One of ['implicit', 'explicit']. If connections are done explicitly or through
promotions ('implicit').
'partial_type': One of ['array', 'sparse', 'aij']. How the component partial derivatives are
specified ('array').
- 'array': Uses an ndarray.
- 'sparse': Uses the Scipy CSR sparse format.
- 'aij': Uses the [values, rows, cols] format.
'finite_difference': bool. If derivatives should be approximated with finite differences.
'num_comp': int. Number of components to use. Must be at least 2. (2)
'num_var': int. Number of variables to use per component. Must be at least 1. (3)
'var_shape': tuple(int). Shape to use for each variable. (2, 3).
"""
import unittest
from six import iterkeys
from openmdao.test_suite.parametric_suite import parametric_suite
from openmdao.devtools.testutil import assert_rel_error
class ParameterizedTestCases(unittest.TestCase):
"""Demonstration of parametric testing using the full test suite."""
@parametric_suite('*')
def test_openmdao(self, test):
test.setup()
problem = test.problem
root = problem.model
expected_values = root.expected_values
if expected_values:
actual = {key: problem[key] for key in iterkeys(expected_values)}
assert_rel_error(self, actual, expected_values, 1e-8)
error_bound = 1e-4 if root.metadata['finite_difference'] else 1e-8
expected_totals = root.expected_totals
if expected_totals:
# Forward Derivatives Check
totals = test.compute_totals('fwd')
assert_rel_error(self, totals, expected_totals, error_bound)
# Reverse Derivatives Check
totals = test.compute_totals('rev')
assert_rel_error(self, totals, expected_totals, error_bound)
class ParameterizedTestCasesSubset(unittest.TestCase):
"""Duplicating some testing to demonstrate filters and default running."""
@parametric_suite(jacobian_type='*',
num_comp=[2, 5, 10],
partial_type='aij',
run_by_default=True,
component_class='*')
def test_subset(self, param_instance):
param_instance.setup()
problem = param_instance.problem
model = problem.model
expected_values = model.expected_values
if expected_values:
actual = {key: problem[key] for key in iterkeys(expected_values)}
assert_rel_error(self, actual, expected_values, 1e-8)
expected_totals = model.expected_totals
if expected_totals:
# Forward Derivatives Check
totals = param_instance.compute_totals('fwd')
assert_rel_error(self, totals, expected_totals, 1e-8)
# Reverse Derivatives Check
totals = param_instance.compute_totals('rev')
assert_rel_error(self, totals, expected_totals, 1e-8)
if __name__ == '__main__':
unittest.main()
| 42.97
| 114
| 0.64417
|
15a9c93f94c0d6127511bb554f4dfaa6586f2177
| 1,960
|
py
|
Python
|
common/log_config.py
|
superxuu/fastapi_mock
|
f0c7553e16cf303c512e21a477dee8f595f44cfa
|
[
"MIT"
] | 7
|
2020-05-08T08:20:21.000Z
|
2021-12-11T17:51:58.000Z
|
src/utils/log_config.py
|
superxuu/fastapi_pony
|
eb583a644ad4ebd5e966e892b55fd543f3a90df3
|
[
"MIT"
] | null | null | null |
src/utils/log_config.py
|
superxuu/fastapi_pony
|
eb583a644ad4ebd5e966e892b55fd543f3a90df3
|
[
"MIT"
] | null | null | null |
import time
from pathlib import Path
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
# "fmt": "%(levelprefix)s %(message)s",
"fmt": '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s:%(message)s',
"use_colors": True,
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
# "fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s',
"fmt": '%(asctime)s %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s',
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
# "stream": "ext://sys.stdout",
},
"access": {
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
"filelog_default": {
"formatter": "default",
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": Path(f'logs/{time.strftime("%Y-%m-%d", time.localtime())}.log'),
"encoding": "utf8",
"backupCount":5,
"when":"midnight",
},
"filelog_access": {
"formatter": "access",
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": Path(f'logs/{time.strftime("%Y-%m-%d", time.localtime())}.log'),
"encoding": "utf8",
"backupCount": 5,
"when": "midnight",
},
},
"loggers": {
"": {"handlers": ["default", "filelog_default"], "level": "INFO"},
"uvicorn.error": {"level": "INFO"},
"uvicorn.access": {"handlers": ["access", "filelog_access"], "level": "INFO", "propagate": False},
},
}
| 35
| 106
| 0.492347
|
5906a3dd6b8bb02ea7e0224a9964aa2481966a5b
| 3,847
|
py
|
Python
|
gizmos/helpers.py
|
pramodsshinde/gizmos
|
f090aaaf80fed1caf5626a487a779722d47688b5
|
[
"BSD-3-Clause"
] | null | null | null |
gizmos/helpers.py
|
pramodsshinde/gizmos
|
f090aaaf80fed1caf5626a487a779722d47688b5
|
[
"BSD-3-Clause"
] | null | null | null |
gizmos/helpers.py
|
pramodsshinde/gizmos
|
f090aaaf80fed1caf5626a487a779722d47688b5
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import psycopg2
import re
import sqlite3
from configparser import ConfigParser
def add_labels(cur):
"""Create a temporary labels table. If a term does not have a label, the label is the ID."""
# Create a tmp labels table
cur.execute("CREATE TABLE tmp_labels(term TEXT PRIMARY KEY, label TEXT)")
if isinstance(cur, sqlite3.Cursor):
# Add all terms with label
cur.execute(
"""INSERT OR IGNORE INTO tmp_labels SELECT subject, value
FROM statements WHERE predicate = 'rdfs:label'"""
)
# Update remaining with their ID as their label
cur.execute(
"INSERT OR IGNORE INTO tmp_labels SELECT DISTINCT subject, subject FROM statements"
)
cur.execute(
"INSERT OR IGNORE INTO tmp_labels SELECT DISTINCT predicate, predicate FROM statements"
)
else:
# Do the same for a psycopg2 Cursor
cur.execute(
"""INSERT INTO tmp_labels
SELECT subject, value FROM statements WHERE predicate = 'rdfs:label'
ON CONFLICT (term) DO NOTHING"""
)
cur.execute(
"""INSERT INTO tmp_labels
SELECT DISTINCT subject, subject FROM statements
ON CONFLICT (term) DO NOTHING"""
)
cur.execute(
"""INSERT INTO tmp_labels
SELECT DISTINCT predicate, predicate FROM statements
ON CONFLICT (term) DO NOTHING"""
)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_connection(file):
"""Given a file ending in .db or .ini, create a database connection."""
if file.endswith(".db"):
# Always SQLite
logging.info("Initializing SQLite connection")
return sqlite3.connect(file)
elif file.endswith(".ini"):
# Always PostgreSQL (for now)
config_parser = ConfigParser()
config_parser.read(file)
if config_parser.has_section("postgresql"):
params = {}
for param in config_parser.items("postgresql"):
params[param[0]] = param[1]
else:
logging.error(
"Unable to create database connection; missing [postgresql] section from " + file
)
return None
logging.info("Initializing PostgreSQL connection")
return psycopg2.connect(**params)
logging.error(
"Either a database file or a config file must be specified with a .db or .ini extension"
)
return None
def get_ids(cur, id_or_labels):
"""Create a list of IDs from a list of IDs or labels."""
ids = []
for id_or_label in id_or_labels:
cur.execute(f"SELECT term FROM tmp_labels WHERE label = '{id_or_label}'")
res = cur.fetchone()
if res:
ids.append(res[0])
else:
# Make sure this exists as an ID
cur.execute(f"SELECT label FROM tmp_labels WHERE term = '{id_or_label}'")
res = cur.fetchone()
if res:
ids.append(id_or_label)
else:
logging.warning(f" '{id_or_label}' does not exist in database")
return ids
def get_terms(term_list, terms_file):
"""Get a list of terms from a list and/or a file from args."""
terms = term_list or []
if terms_file:
with open(terms_file, "r") as f:
for line in f:
if line.startswith("#"):
continue
if not line.strip():
continue
m = re.match(r"(.+)\s#.+", line)
if m:
terms.append(m.group(1).strip())
else:
terms.append(line.strip())
return terms
| 33.745614
| 99
| 0.576033
|
c8eefdbf26b985a802ede20e1c50207962d8f5bc
| 2,338
|
py
|
Python
|
lib/db/db.py
|
JLpython-py/anti-ghostping-bot
|
344209488870d1826160c4fcb1c2118b3f2c881f
|
[
"MIT"
] | 1
|
2021-02-07T05:01:59.000Z
|
2021-02-07T05:01:59.000Z
|
lib/db/db.py
|
JLpython-py/anti-ghostping-bot
|
344209488870d1826160c4fcb1c2118b3f2c881f
|
[
"MIT"
] | 4
|
2021-01-26T15:13:28.000Z
|
2021-01-26T15:24:12.000Z
|
lib/db/db.py
|
JLpython-py/anti-ghostping-bot
|
344209488870d1826160c4fcb1c2118b3f2c881f
|
[
"MIT"
] | 1
|
2021-05-31T19:45:34.000Z
|
2021-05-31T19:45:34.000Z
|
#! python3
# db.py
"""
Manages the SQLite database in data/db
===============================================================================
Copyright (c) 2021 Jacob Lee
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
===============================================================================
"""
import os
import sqlite3
class DBConnection:
""" Connect to data/db/db.sqlite
"""
def __init__(self):
self.connection = sqlite3.connect(
os.path.join('data', 'db', 'db.sqlite')
)
self.cursor = self.connection.cursor()
def close_connection(self):
""" Close database connection
"""
self.connection.close()
def execute_query(self, query, mode, *args):
""" Pass the given query to the cursor under the determined mode
"""
mode = mode.lower()
if mode == "w":
self.cursor.execute(query, tuple(args))
self.connection.commit()
values = []
elif mode == "r":
self.cursor.execute(query, tuple(args))
values = self.cursor.fetchall()
elif mode == "rr":
self.cursor.execute(query, tuple(args))
values = [
[d[0] for d in self.cursor.description],
self.cursor.fetchall()
]
else:
values = []
return values
| 34.895522
| 79
| 0.621899
|
3974d8435bb0330a65266785ee86e2879443e9f3
| 5,698
|
py
|
Python
|
bpmn/migrations/0001_initial.py
|
marcelobbfonseca/SFDjango-BPMN
|
50565763414f52d9e84004494cf550c6fe2358fa
|
[
"MIT"
] | 1
|
2021-09-22T02:04:07.000Z
|
2021-09-22T02:04:07.000Z
|
bpmn/migrations/0001_initial.py
|
VSSantana/SFDjango-BPMN
|
e5a3fb8da9282fd88f72a85a4b34d89d38391e36
|
[
"MIT"
] | 5
|
2021-09-22T13:54:06.000Z
|
2021-09-22T14:05:56.000Z
|
bpmn/migrations/0001_initial.py
|
marcelobbfonseca/SFDjango-BPMN
|
50565763414f52d9e84004494cf550c6fe2358fa
|
[
"MIT"
] | 1
|
2021-09-18T01:22:25.000Z
|
2021-09-18T01:22:25.000Z
|
# Generated by Django 3.1.2 on 2021-02-25 18:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name_plural': 'Activities',
},
),
migrations.CreateModel(
name='ActivityType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
],
options={
'verbose_name': 'Activity Type',
'verbose_name_plural': 'Activity Types',
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('Start process', 'Start process'), ('End process', 'End process')], default='', max_length=255)),
],
options={
'verbose_name': 'Event',
'verbose_name_plural': 'Events',
},
),
migrations.CreateModel(
name='Flow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Flow',
'verbose_name_plural': 'Flows',
},
),
migrations.CreateModel(
name='Pool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
],
options={
'verbose_name_plural': 'Pools',
},
),
migrations.CreateModel(
name='Sequence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_activity', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='current_act', to='bpmn.activitytype')),
('current_event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='current_evt', to='bpmn.event')),
('next_activity_options', models.ManyToManyField(blank=True, related_name='next_acts_opts', to='bpmn.ActivityType')),
('next_event_options', models.ManyToManyField(blank=True, related_name='next_event_opts', to='bpmn.Event')),
],
options={
'verbose_name_plural': 'Sequences',
},
),
migrations.CreateModel(
name='ProcessType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('flow', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bpmn.flow')),
],
options={
'verbose_name': 'Process Type',
'verbose_name_plural': 'Process Types',
},
),
migrations.CreateModel(
name='Process',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('status', models.CharField(default='INITIALIZING', max_length=255)),
('current', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='bpmn.activity')),
('type', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='bpmn.processtype')),
],
options={
'verbose_name_plural': 'Processes',
},
),
migrations.CreateModel(
name='Lane',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('pool', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='bpmn.pool')),
('responsable', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'verbose_name_plural': 'Lanes',
},
),
migrations.AddField(
model_name='flow',
name='sequences',
field=models.ManyToManyField(to='bpmn.Sequence'),
),
migrations.AddField(
model_name='activitytype',
name='lane',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='bpmn.lane'),
),
migrations.AddField(
model_name='activity',
name='type',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='bpmn.activitytype'),
),
]
| 42.842105
| 176
| 0.552826
|
ac08da6704cd3dc1333dd262dbfac0fa6bfda260
| 57,164
|
py
|
Python
|
tensorflow/python/training/monitored_session.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/training/monitored_session.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/training/monitored_session.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import sys
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import function_utils
from tensorflow.python.util.tf_export import tf_export
# The list of exceptions that we should recover from. Exceptions not in this
# list may terminate the job.
_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError)
# Value that indicates no value was provided.
USE_DEFAULT = object()
@tf_export(v1=['train.Scaffold'])
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.compat.v1.train.Saver` object taking care of saving the
variables.
Picked from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph by default.
* `ready_for_local_init_op`: An op to verify that global state has been
initialized and it is alright to run `local_init_op`. Picked from and
stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by
default. This is needed when the initialization of local variables depends
on the values of global variables.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph by default.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph by default.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A session feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=None,
summary_op=None,
saver=None,
copy_from_scaffold=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty 1D string tensor when the variables are initialized, or
a non-empty 1D string tensor listing the names of the non-initialized
variables.
ready_for_local_init_op: Optional op to verify that the global variables
are initialized and `local_init_op` can be run. Must return an empty 1D
string tensor when the global variables are initialized, or a non-empty
1D string tensor listing the names of the non-initialized global
variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.compat.v1.train.Saver` object to use to save and
restore variables. May also be a `tf.train.Checkpoint` object, in which
case object-based checkpoints are saved. This will also load some
object-based checkpoints saved from elsewhere, but that loading may be
fragile since it uses fixed keys rather than performing a full
graph-based match. For example if a variable has two paths from the
`Checkpoint` object because two `Model` objects share the `Layer` object
that owns it, removing one `Model` may change the keys and break
checkpoint loading through this API, whereas a graph-based match would
match the variable through the other `Model`.
copy_from_scaffold: Optional scaffold object to copy fields from. Its
fields will be overwritten by the provided fields in this function.
"""
if copy_from_scaffold is not None:
if not isinstance(copy_from_scaffold, Scaffold):
raise TypeError('copy_from_scaffold is not a Scaffold instance.')
# We need _coalesce since Tensor is not converted to bool automatically,
# so the common idiom of (a or b) does not work.
coalesce = lambda a, b: a if a is not None else b
init_op = coalesce(init_op, copy_from_scaffold.init_op)
init_feed_dict = coalesce(init_feed_dict,
copy_from_scaffold.init_feed_dict)
# Use the original init_fn provided by the user to init the new Scaffold.
init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access
ready_op = coalesce(ready_op, copy_from_scaffold.ready_op)
ready_for_local_init_op = coalesce(
ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op)
local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op)
summary_op = coalesce(summary_op, copy_from_scaffold.summary_op)
saver = coalesce(saver, copy_from_scaffold.saver)
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
self._user_init_fn = init_fn
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._init_feed_dict = init_feed_dict
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default('init_op', ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default('ready_op',
ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return array_ops.concat([
variables.report_uninitialized_variables(
variables.global_variables()),
resources.report_uninitialized_resources(
resources.shared_resources())
], 0)
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold.default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
if isinstance(self._saver, trackable_util.Checkpoint):
self._saver = training_saver.Saver(
var_list=graph_view.ObjectGraphView(
self._saver).frozen_saveable_objects(),
sharded=True)
else:
self._saver.build()
ops.get_default_graph().finalize()
logging.info('Graph was finalized.')
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError(
'More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key, arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def default_local_init_op():
"""Returns an op that groups the default local init ops.
This op is used during session initialization when a Scaffold is
initialized without specifying the local_init_op arg. It includes
`tf.compat.v1.local_variables_initializer`,
`tf.compat.v1.tables_initializer`, and also
initializes local session resources.
Returns:
The default Scaffold local init op.
"""
return control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer(),
resources.initialize_resources(resources.local_resources()))
def _create_monitored_session_with_worker_context(
worker_context, # pylint: disable=missing-docstring
scaffold,
checkpoint_dir=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=None,
summary_dir=None):
all_hooks = []
if hooks:
all_hooks.extend(hooks)
if chief_only_hooks and worker_context.is_chief:
all_hooks.extend(chief_only_hooks)
# We need to call save or summary ops on all workers since these ops may
# contain collective ops, only running save ops on some workers would make
# collective ops hang. Therefore on those workers that don't need to actually
# write checkpoints or summaries, we let them write to a temp directory.
# pylint: disable=protected-access
if type(
worker_context._strategy).__name__ in ('CollectiveAllReduceStrategy',
'CollectiveAllReduceStrategyV1',
'MultiWorkerMirroredStrategy'):
if worker_context.task_type:
tmpdir = 'tmp_%s_%d' % (worker_context.task_type, worker_context.task_id)
else:
tmpdir = 'tmp'
if save_checkpoint_secs:
logging.warning('Collective ops may deadlock with '
'`save_checkpoints_secs` please use '
'`save_checkpoint_steps` instead. Clearing '
'`save_checkpoint_secs` and setting '
'`save_checkpoint_steps` to 1000 now.')
save_checkpoint_secs = None
save_checkpoint_steps = 1000
if save_summaries_secs:
logging.warning('Collective ops may run out of sync with'
'`save_summaries_secs`, please use '
'`save_summaries_steps` instead.')
else:
tmpdir = None
summary_dir = summary_dir or checkpoint_dir
if summary_dir and log_step_count_steps and log_step_count_steps > 0:
if worker_context.should_save_summary:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=os.path.join(summary_dir, tmpdir),
every_n_steps=log_step_count_steps))
if (((save_summaries_steps and save_summaries_steps > 0) or
(save_summaries_secs and save_summaries_secs > 0)) and summary_dir):
if worker_context.should_save_summary:
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=os.path.join(summary_dir, tmpdir)))
if (((save_checkpoint_secs and save_checkpoint_secs > 0) or
(save_checkpoint_steps and save_checkpoint_steps > 0)) and
checkpoint_dir):
if worker_context.should_checkpoint:
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
os.path.join(checkpoint_dir, tmpdir),
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
logging.info('all_hooks %r', all_hooks)
session_creator = worker_context.session_creator(
scaffold,
config=config,
checkpoint_dir=checkpoint_dir,
max_wait_secs=max_wait_secs)
return MonitoredSession(
session_creator=session_creator,
hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.MonitoredTrainingSession'])
def MonitoredTrainingSession(
master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=USE_DEFAULT,
save_summaries_steps=USE_DEFAULT,
save_summaries_secs=USE_DEFAULT,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=USE_DEFAULT,
summary_dir=None):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
initialize/restore. Please check `tf.compat.v1.train.MonitoredSession` for
more
information.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If not
specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If both `save_checkpoint_steps` and
`save_checkpoint_secs` are set to `None`, then the default checkpoint
saver isn't used. If both are provided, then only `save_checkpoint_secs`
is used. Default 600.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs: The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config: an instance of `tf.compat.v1.ConfigProto` proto used to configure
the session. It's the `config` argument of constructor of
`tf.compat.v1.Session`.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec is logged.
max_wait_secs: Maximum time workers should wait for the session to become
available. This should be kept relatively short to help detect incorrect
code, but sometimes may need to be increased if the chief takes a while to
start up.
save_checkpoint_steps: The frequency, in number of global steps, that a
checkpoint is saved using a default checkpoint saver. If both
`save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then
the default checkpoint saver isn't used. If both are provided, then only
`save_checkpoint_secs` is used. Default not enabled.
summary_dir: A string. Optional path to a directory where to save
summaries. If None, checkpoint_dir is used instead.
Returns:
A `MonitoredSession` object.
"""
if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:
save_summaries_steps = 100
save_summaries_secs = None
elif save_summaries_secs == USE_DEFAULT:
save_summaries_secs = None
elif save_summaries_steps == USE_DEFAULT:
save_summaries_steps = None
if (save_checkpoint_steps == USE_DEFAULT and
save_checkpoint_secs == USE_DEFAULT):
save_checkpoint_steps = None
save_checkpoint_secs = 600
elif save_checkpoint_secs == USE_DEFAULT:
save_checkpoint_secs = None
elif save_checkpoint_steps == USE_DEFAULT:
save_checkpoint_steps = None
scaffold = scaffold or Scaffold()
worker_context = distribute_coordinator_context.get_current_worker_context()
if worker_context:
return _create_monitored_session_with_worker_context(
worker_context,
scaffold,
checkpoint_dir=checkpoint_dir,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
save_summaries_secs=save_summaries_secs,
config=config,
stop_grace_period_secs=stop_grace_period_secs,
log_step_count_steps=log_step_count_steps,
max_wait_secs=max_wait_secs,
save_checkpoint_steps=save_checkpoint_steps,
summary_dir=summary_dir)
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold,
master=master,
config=config,
max_wait_secs=max_wait_secs)
return MonitoredSession(
session_creator=session_creator,
hooks=hooks or [],
stop_grace_period_secs=stop_grace_period_secs)
all_hooks = []
if chief_only_hooks:
all_hooks.extend(chief_only_hooks)
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
summary_dir = summary_dir or checkpoint_dir
if summary_dir:
if log_step_count_steps and log_step_count_steps > 0:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
if (save_summaries_steps and
save_summaries_steps > 0) or (save_summaries_secs and
save_summaries_secs > 0):
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
if checkpoint_dir:
if (save_checkpoint_secs and
save_checkpoint_secs > 0) or (save_checkpoint_steps and
save_checkpoint_steps > 0):
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
if hooks:
all_hooks.extend(hooks)
return MonitoredSession(
session_creator=session_creator,
hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.SessionCreator'])
@six.add_metaclass(abc.ABCMeta)
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
@tf_export(v1=['train.ChiefSessionCreator'])
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.compat.v1.Session for a chief."""
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
@tf_export(v1=['train.WorkerSessionCreator'])
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.compat.v1.Session for a worker."""
def __init__(self,
scaffold=None,
master='',
config=None,
max_wait_secs=30 * 60):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
self._max_wait_secs = max_wait_secs
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config, max_wait_secs=self._max_wait_secs)
class _MonitoredSession(object):
"""See `MonitoredSession` or `SingularMonitoredSession`."""
def __init__(self,
session_creator,
hooks,
should_recover,
stop_grace_period_secs=120):
"""Sets up a Monitored or Hooked Session.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` or a `WorkerSessionCreator`.
hooks: An iterable of `SessionRunHook' objects.
should_recover: A bool. Indicates whether to recover from `AbortedError`
and `UnavailableError` or not.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
self._graph_was_finalized = ops.get_default_graph().finalized
self._hooks = hooks or []
for h in self._hooks:
h.begin()
worker_context = distribute_coordinator_context.get_current_worker_context()
if not session_creator and worker_context:
session_creator = worker_context.session_creator()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks,
stop_grace_period_secs=stop_grace_period_secs)
if should_recover:
self._sess = _RecoverableSession(self._coordinated_creator)
else:
self._sess = self._coordinated_creator.create_session()
@property
def graph(self):
"""The graph that was launched in this session."""
if self._tf_sess() is None:
return None
return self._tf_sess().graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def run_step_fn(self, step_fn):
"""Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to perform
computations with access to a raw session. The returned value of the
`step_fn` will be returned from `run_step_fn`, unless a stop is
requested. In that case, the next `should_stop` call will return True.
Example usage:
```python
with tf.Graph().as_default():
c = tf.compat.v1.placeholder(dtypes.float32)
v = tf.add(c, 4.0)
w = tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5:
step_context.request_stop()
return step_context.run_with_hooks(fetches=w,
feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop():
a = session.run_step_fn(step_fn)
```
Hooks interact with the `run_with_hooks()` call inside the
`step_fn` as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object.
"""
step_fn_arguments = function_utils.fn_args(step_fn)
if step_fn_arguments != ('step_context',) and step_fn_arguments != (
'self',
'step_context',
):
raise ValueError(
'`step_fn` may either have one `step_context` argument, or'
' `self` and `step_context` arguments if it\'s an instance'
' method. Got {} instead.'.format(step_fn_arguments))
# `self._sess` is either `_RecoverableSession` or a `_CoordinatedSession`.
# Setting `run_with_hooks` to `None` will cause `run_with_hooks` to be
# `_CoordinatedSession.run` downstream in either case. This allows
# `_PREEMPTION_ERRORS` to propage from within `step_fn` to
# `_RecoverableSession.run_step_fn`.
return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None)
class StepContext(object):
"""Control flow instrument for the `step_fn` from `run_step_fn()`.
Users of `step_fn` may perform `run()` calls without running hooks
by accessing the `session`. A `run()` call with hooks may be performed
using `run_with_hooks()`. Computation flow can be interrupted using
`request_stop()`.
"""
def __init__(self, session, run_with_hooks_fn):
"""Initializes the `step_context` argument for a `step_fn` invocation.
Args:
session: An instance of `tf.compat.v1.Session`.
run_with_hooks_fn: A function for running fetches and hooks.
"""
self._session = session
self._run_with_hooks_fn = run_with_hooks_fn
@property
def session(self):
return self._session
def run_with_hooks(self, *args, **kwargs):
"""Same as `MonitoredSession.run`. Accepts the same arguments."""
return self._run_with_hooks_fn(*args, **kwargs)
def request_stop(self):
"""Exit the training loop by causing `should_stop()` to return `True`.
Causes `step_fn` to exit by raising an exception.
Raises:
StopIteration
"""
raise StopIteration('step_fn has requested the iterations to stop.')
def should_stop(self):
return self._sess is None or self._sess.should_stop()
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(SessionCreator):
"""Factory for _CoordinatedSession."""
def __init__(self, session_creator, hooks, stop_grace_period_secs):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
self._stop_grace_period_secs = stop_grace_period_secs
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
# Inform the hooks that a new session has been created.
for hook in self._hooks:
hook.after_create_session(self.tf_sess, self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord,
self._stop_grace_period_secs)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
if self._sess is None:
raise RuntimeError('Session is already closed.')
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
if not self._graph_was_finalized:
ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access
def _is_closed(self):
"""Return True if the monitored session is closed.
For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
def _tf_sess(self):
"""Return underlying tf.compat.v1.Session object.
Warning: accessing the returned object in user code is likely to cause races
or "flaky tests".
Returns:
A tf.compat.v1.Session object.
"""
return self._coordinated_creator.tf_sess
@tf_export(v1=['train.MonitoredSession'])
class MonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
* calls `hook.after_create_session()`
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` or `UnavailableError` occurs, it recovers or
reinitializes the session before executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context
How to set `tf.compat.v1.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
Note: This is not a `tf.compat.v1.Session`. For example, it cannot do
following:
* it cannot be set as default session.
* it cannot be sent to saver.save.
* it cannot be sent to tf.train.start_queue_runners.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
Returns:
A MonitoredSession object.
"""
def __init__(self,
session_creator=None,
hooks=None,
stop_grace_period_secs=120):
super(MonitoredSession, self).__init__(
session_creator,
hooks,
should_recover=True,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.SingularMonitoredSession'])
class SingularMonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, restoring, and hooks.
Please note that this utility is not recommended for distributed settings.
For distributed settings, please use `tf.compat.v1.train.MonitoredSession`.
The
differences between `MonitoredSession` and `SingularMonitoredSession` are:
* `MonitoredSession` handles `AbortedError` and `UnavailableError` for
distributed settings, but `SingularMonitoredSession` does not.
* `MonitoredSession` can be created in `chief` or `worker` modes.
`SingularMonitoredSession` is always created as `chief`.
* You can access the raw `tf.compat.v1.Session` object used by
`SingularMonitoredSession`, whereas in MonitoredSession the raw session is
private. This can be used:
- To `run` without hooks.
- To save and restore.
* All other functionality is identical.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the hooked session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the hooked session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
Exit: At the `close()`, the hooked session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the `SingularMonitoredSession` is used as a context.
"""
def __init__(self,
hooks=None,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
stop_grace_period_secs=120,
checkpoint_filename_with_path=None):
"""Creates a SingularMonitoredSession.
Args:
hooks: An iterable of `SessionRunHook' objects.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
checkpoint_filename_with_path: A string. Optional path to a checkpoint
file from which to restore variables.
"""
session_creator = ChiefSessionCreator(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
super(SingularMonitoredSession, self).__init__(
session_creator,
hooks,
should_recover=False,
stop_grace_period_secs=stop_grace_period_secs)
def raw_session(self):
"""Returns underlying `TensorFlow.Session` object."""
return self._tf_sess()
class _WrappedSession(object):
"""Wrapper around a `tf.compat.v1.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.compat.v1.Session` or `_WrappedSession` object. The wrapped
session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
except _PREEMPTION_ERRORS as e:
logging.warning(
'An error occurred when attempting to close the '
'session. This may be due to a preemption in a '
'connected worker or parameter server. Error: %s', e)
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
# `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`.
# It is `None` when called from `_CoordinatedSession`. In that case
# `self.run` is `_CoordinatedSession.run`.
run_with_hooks = run_with_hooks or self.run
return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks))
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session upon certain kinds of errors.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the
wrapped session is closed, and a new one is created by calling the factory
again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._create_session())
def _create_session(self):
while True:
try:
return self._sess_creator.create_session()
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while a session was being created. '
'This may be due to a preemption of a connected worker '
'or parameter server. A new session will be created. '
'This error may also occur due to a gRPC failure caused '
'by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
def _check_stop(self):
try:
if self._sess:
return self._sess._check_stop() # pylint: disable=protected-access
else:
return True
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while considering whether the '
'session is complete. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = self._create_session()
# Since we have just recreated the session, the overall computation should
# not stop:
return False
except Exception: # pylint: disable=broad-except
# `should_stop` should return True instead of raising an exception.
return True
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
while True:
try:
if not self._sess:
self._sess = self._create_session()
run_with_hooks = self._sess.run
return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord, stop_grace_period_secs=120):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.compat.v1.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
self._stop_grace_period_secs = stop_grace_period_secs
def _check_stop(self):
# If the coordinator was asked to stop due to an exception, then it needs
# to be propagated to this stack.
self._coord.raise_requested_exception()
# At this point, no exceptions are recorded in the coordinator.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join(
stop_grace_period_secs=self._stop_grace_period_secs,
ignore_live_threads=True)
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
def run(self, *args, **kwargs):
try:
return self._sess.run(*args, **kwargs)
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
# A non-preemption error could have been caused by a preemption error
# in the coordinator. If this is the case, raise that exception instead,
# since it's the root cause. Otherwise, stick to the `original_exc_info`.
original_exc_info = sys.exc_info()
try:
self._coord.raise_requested_exception()
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
raise six.reraise(*original_exc_info)
else:
raise six.reraise(*original_exc_info)
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.compat.v1.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
options = options or config_pb2.RunOptions()
feed_dict = self._call_hook_before_run(run_context, actual_fetches,
feed_dict, options)
# Do session run.
run_metadata = run_metadata or config_pb2.RunMetadata()
outputs = _WrappedSession.run(
self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(
results=outputs[hook] if hook in outputs else None,
options=options,
run_metadata=run_metadata))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,
options):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if request.options:
self._merge_run_options(options, request.options)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
def _merge_run_options(self, options, incoming_options):
"""Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
"""
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms,
incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool,
incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(
options.output_partition_graphs,
incoming_options.output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
incoming_options.debug_options.debug_tensor_watch_opts)
options.debug_options.reset_disk_byte_usage = (
options.debug_options.reset_disk_byte_usage or
incoming_options.debug_options.reset_disk_byte_usage)
options.report_tensor_allocations_upon_oom = (
options.report_tensor_allocations_upon_oom or
incoming_options.report_tensor_allocations_upon_oom)
| 38.288011
| 103
| 0.694248
|
c6f1bd17be733a6dba7dfa92acbbfbcd1906f16a
| 2,965
|
py
|
Python
|
tests/test_advanced.py
|
gitter-badger/aimay
|
b38cb34ef551fbb08d52197ae5310dae99367bab
|
[
"MIT"
] | null | null | null |
tests/test_advanced.py
|
gitter-badger/aimay
|
b38cb34ef551fbb08d52197ae5310dae99367bab
|
[
"MIT"
] | null | null | null |
tests/test_advanced.py
|
gitter-badger/aimay
|
b38cb34ef551fbb08d52197ae5310dae99367bab
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .context import aimay
import unittest
class TestCore(unittest.TestCase):
def test_get_reply(self):
# --- text
reply = aimay.get_reply('text', 'おうむがえし!')
self.assertEqual('text', reply[0])
self.assertEqual('おうむがえし!ニャン', reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', 'ちゅーるを買ったよ')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', 'りんりん(相棒)の調子はどう?')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', 'いい音楽ないかな')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', '映画を観たいな')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', '面白いドラマあるかな')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', 'ゲームをやろうかな')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', '天気どうなるかな')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
for i in range(10):
reply = aimay.get_reply('text', 'おやすみー')
self.assertEqual('sticker', reply[0])
self.assertIsNone(reply[1])
self.assertIsNotNone(reply[2])
self.assertIsNotNone(reply[3])
reply = aimay.get_reply('text', 'debug_sticker')
self.assertEqual('sticker', reply[0])
self.assertIsNone(reply[1])
self.assertIsNotNone(reply[2])
self.assertIsNotNone(reply[3])
reply = aimay.get_reply('text', 'debug_message')
self.assertIsNotNone(reply[0])
self.assertIsNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
reply = aimay.get_reply('text', 'あれ')
self.assertEqual('text', reply[0])
self.assertIsNotNone(reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
# --- sticker
for i in range(10):
reply = aimay.get_reply('sticker', None)
self.assertEqual('sticker', reply[0])
self.assertIsNone(reply[1])
self.assertIsNotNone(reply[2])
self.assertIsNotNone(reply[3])
# --- debug
reply = aimay.get_reply('', None)
self.assertEqual('text', reply[0])
self.assertEqual('よくわかりませんニャン', reply[1])
self.assertIsNone(reply[2])
self.assertIsNone(reply[3])
if __name__ == "__main__":
unittest.main()
| 29.356436
| 54
| 0.654975
|
dbfbb444580a1b540f7ab33ae84462cb73dfb5b8
| 1,816
|
py
|
Python
|
xdl/test/python/unit_test/test_take_grad.py
|
Ru-Xiang/x-deeplearning
|
04cc0497150920c64b06bb8c314ef89977a3427a
|
[
"Apache-2.0"
] | 4,071
|
2018-12-13T04:17:38.000Z
|
2022-03-30T03:29:35.000Z
|
xdl/test/python/unit_test/test_take_grad.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 359
|
2018-12-21T01:14:57.000Z
|
2022-02-15T07:18:02.000Z
|
xdl/test/python/unit_test/test_take_grad.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 1,054
|
2018-12-20T09:57:42.000Z
|
2022-03-29T07:16:53.000Z
|
# Copyright 2018 Alibaba Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import xdl
import unittest
import numpy as np
from xdl.python.lib.datatype import *
comm = np.array([[0.1,0.2,0.3],[0.4,0.5,0.6],[0.7,0.8,0.9]],dtype=np.float)
with xdl.device('CPU'):
comm_shape = xdl.shape_op(comm)
comm_grad = np.array([[0.1,0.2,0.3],[0.4,0.5,0.6],[0.7,0.8,0.9],
[0.1,0.2,0.3],[0.4,0.5,0.6]],dtype=np.float)
indicator = np.array([0,0,1,1,2],dtype=np.int32)
class TestTakeGrad(unittest.TestCase):
def test_cpu(self):
out = xdl.take_grad(comm_grad, indicator,comm_shape)
out = xdl.execute(out)
res = np.array([[0.5,0.7,0.9],[0.8,1.0,1.2],[0.4,0.5,0.6]],dtype=np.float)
self.assertTrue(np.allclose(out, res))
def test_gpu(self):
with xdl.device("GPU"):
out = xdl.take_grad(comm_grad, indicator,comm_shape)
out = xdl.execute(out)
res = np.array([[0.5,0.7,0.9],[0.8,1.0,1.2],[0.4,0.5,0.6]],dtype=np.float)
self.assertTrue(np.allclose(out, res))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestTakeGrad)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| 38.638298
| 86
| 0.632709
|
51318fd56a0bf7dc74a4a124e7249fa24fe5e51f
| 63
|
py
|
Python
|
virtual/walt/virtual/vpn/const.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 4
|
2020-01-14T09:12:56.000Z
|
2022-03-14T14:35:11.000Z
|
virtual/walt/virtual/vpn/const.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 73
|
2016-04-29T13:17:26.000Z
|
2022-03-01T15:06:48.000Z
|
virtual/walt/virtual/vpn/const.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 3
|
2019-03-18T14:27:56.000Z
|
2021-06-03T12:07:02.000Z
|
VPN_SOCK_PATH = "/var/run/walt-vpn.sock"
VPN_SOCK_BACKLOG = 20
| 21
| 40
| 0.761905
|
a85eaa2d05e12fc690c11a8fda804b2159745da5
| 25,561
|
py
|
Python
|
backend/src/baserow/contrib/database/views/handler.py
|
jacklicn/baserow
|
978d9462ededbaa96674a6653028ba19876ea273
|
[
"MIT"
] | 1
|
2021-04-13T16:27:58.000Z
|
2021-04-13T16:27:58.000Z
|
backend/src/baserow/contrib/database/views/handler.py
|
jacklicn/baserow
|
978d9462ededbaa96674a6653028ba19876ea273
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/views/handler.py
|
jacklicn/baserow
|
978d9462ededbaa96674a6653028ba19876ea273
|
[
"MIT"
] | null | null | null |
from django.db.models import F
from baserow.contrib.database.fields.exceptions import FieldNotInTable
from baserow.contrib.database.fields.models import Field
from baserow.contrib.database.fields.registries import field_type_registry
from baserow.core.utils import extract_allowed, set_allowed_attrs
from .exceptions import (
ViewDoesNotExist, UnrelatedFieldError, ViewFilterDoesNotExist,
ViewFilterNotSupported, ViewFilterTypeNotAllowedForField, ViewSortDoesNotExist,
ViewSortNotSupported, ViewSortFieldAlreadyExist, ViewSortFieldNotSupported
)
from .models import (
View, GridViewFieldOptions, ViewFilter, ViewSort
)
from .registries import view_type_registry, view_filter_type_registry
from .signals import (
view_created, view_updated, view_deleted, view_filter_created, view_filter_updated,
view_filter_deleted, view_sort_created, view_sort_updated, view_sort_deleted,
grid_view_field_options_updated
)
from baserow.contrib.database.fields.field_filters import FilterBuilder
class ViewHandler:
def get_view(self, view_id, view_model=None, base_queryset=None):
"""
Selects a view and checks if the user has access to that view. If everything
is fine the view is returned.
:param view_id: The identifier of the view that must be returned.
:type view_id: int
:param view_model: If provided that models objects are used to select the
view. This can for example be useful when you want to select a GridView or
other child of the View model.
:type view_model: View
:param base_queryset: The base queryset from where to select the view
object. This can for example be used to do a `select_related`. Note that
if this is used the `view_model` parameter doesn't work anymore.
:type base_queryset: Queryset
:raises ViewDoesNotExist: When the view with the provided id does not exist.
:type view_model: View
:return:
"""
if not view_model:
view_model = View
if not base_queryset:
base_queryset = view_model.objects
try:
view = base_queryset.select_related('table__database__group').get(
pk=view_id
)
except View.DoesNotExist:
raise ViewDoesNotExist(f'The view with id {view_id} does not exist.')
return view
def create_view(self, user, table, type_name, **kwargs):
"""
Creates a new view based on the provided type.
:param user: The user on whose behalf the view is created.
:type user: User
:param table: The table that the view instance belongs to.
:type table: Table
:param type_name: The type name of the view.
:type type_name: str
:param kwargs: The fields that need to be set upon creation.
:type kwargs: object
:return: The created view instance.
:rtype: View
"""
group = table.database.group
group.has_user(user, raise_error=True)
# Figure out which model to use for the given view type.
view_type = view_type_registry.get(type_name)
model_class = view_type.model_class
allowed_fields = [
'name',
'filter_type',
'filters_disabled'
] + view_type.allowed_fields
view_values = extract_allowed(kwargs, allowed_fields)
last_order = model_class.get_last_order(table)
instance = model_class.objects.create(table=table, order=last_order,
**view_values)
view_created.send(self, view=instance, user=user,
type_name=type_name)
return instance
def update_view(self, user, view, **kwargs):
"""
Updates an existing view instance.
:param user: The user on whose behalf the view is updated.
:type user: User
:param view: The view instance that needs to be updated.
:type view: View
:param kwargs: The fields that need to be updated.
:type kwargs: object
:raises ValueError: When the provided view not an instance of View.
:return: The updated view instance.
:rtype: View
"""
if not isinstance(view, View):
raise ValueError('The view is not an instance of View.')
group = view.table.database.group
group.has_user(user, raise_error=True)
view_type = view_type_registry.get_by_model(view)
allowed_fields = [
'name',
'filter_type',
'filters_disabled'
] + view_type.allowed_fields
view = set_allowed_attrs(kwargs, allowed_fields, view)
view.save()
view_updated.send(self, view=view, user=user)
return view
def delete_view(self, user, view):
"""
Deletes an existing view instance.
:param user: The user on whose behalf the view is deleted.
:type user: User
:param view: The view instance that needs to be deleted.
:type view: View
:raises ViewDoesNotExist: When the view with the provided id does not exist.
"""
if not isinstance(view, View):
raise ValueError('The view is not an instance of View')
group = view.table.database.group
group.has_user(user, raise_error=True)
view_id = view.id
view.delete()
view_deleted.send(self, view_id=view_id, view=view, user=user)
def update_grid_view_field_options(self, user, grid_view, field_options,
fields=None):
"""
Updates the field options with the provided values if the field id exists in
the table related to the grid view.
:param user: The user on whose behalf the request is made.
:type user: User
:param grid_view: The grid view for which the field options need to be updated.
:type grid_view: GridView
:param field_options: A dict with the field ids as the key and a dict
containing the values that need to be updated as value.
:type field_options: dict
:param fields: Optionally a list of fields can be provided so that they don't
have to be fetched again.
:type fields: None or list
:raises UnrelatedFieldError: When the provided field id is not related to the
provided view.
"""
grid_view.table.database.group.has_user(user, raise_error=True)
if not fields:
fields = Field.objects.filter(table=grid_view.table)
allowed_field_ids = [field.id for field in fields]
for field_id, options in field_options.items():
if int(field_id) not in allowed_field_ids:
raise UnrelatedFieldError(f'The field id {field_id} is not related to '
f'the grid view.')
GridViewFieldOptions.objects.update_or_create(
grid_view=grid_view, field_id=field_id, defaults=options
)
grid_view_field_options_updated.send(self, grid_view=grid_view, user=user)
def field_type_changed(self, field):
"""
This method is called by the FieldHandler when the field type of a field has
changed. It could be that the field has filters or sortings that are not
compatible anymore. If that is the case then those need to be removed.
:param field: The new field object.
:type field: Field
"""
field_type = field_type_registry.get_by_model(field.specific_class)
# If the new field type does not support sorting then all sortings will be
# removed.
if not field_type.can_order_by:
field.viewsort_set.all().delete()
# Check which filters are not compatible anymore and remove those.
for filter in field.viewfilter_set.all():
filter_type = view_filter_type_registry.get(filter.type)
if field_type.type not in filter_type.compatible_field_types:
filter.delete()
def apply_filters(self, view, queryset):
"""
Applies the view's filter to the given queryset.
:param view: The view where to fetch the fields from.
:type view: View
:param queryset: The queryset where the filters need to be applied to.
:type queryset: QuerySet
:raises ValueError: When the queryset's model is not a table model or if the
table model does not contain the one of the fields.
:return: The queryset where the filters have been applied to.
:type: QuerySet
"""
model = queryset.model
# If the model does not have the `_field_objects` property then it is not a
# generated table model which is not supported.
if not hasattr(model, '_field_objects'):
raise ValueError('A queryset of the table model is required.')
# If the filter are disabled we don't have to do anything with the queryset.
if view.filters_disabled:
return queryset
filter_builder = FilterBuilder(filter_type=view.filter_type)
for view_filter in view.viewfilter_set.all():
# If the to be filtered field is not present in the `_field_objects` we
# cannot filter so we raise a ValueError.
if view_filter.field_id not in model._field_objects:
raise ValueError(f'The table model does not contain field '
f'{view_filter.field_id}.')
field_object = model._field_objects[view_filter.field_id]
field_name = field_object['name']
model_field = model._meta.get_field(field_name)
view_filter_type = view_filter_type_registry.get(view_filter.type)
filter_builder.filter(
view_filter_type.get_filter(
field_name,
view_filter.value,
model_field,
field_object['field']
)
)
return filter_builder.apply_to_queryset(queryset)
def get_filter(self, user, view_filter_id, base_queryset=None):
"""
Returns an existing view filter by the given id.
:param user: The user on whose behalf the view filter is requested.
:type user: User
:param view_filter_id: The id of the view filter.
:type view_filter_id: int
:param base_queryset: The base queryset from where to select the view filter
object. This can for example be used to do a `select_related`.
:type base_queryset: Queryset
:raises ViewFilterDoesNotExist: The the requested view does not exists.
:return: The requested view filter instance.
:type: ViewFilter
"""
if not base_queryset:
base_queryset = ViewFilter.objects
try:
view_filter = base_queryset.select_related(
'view__table__database__group'
).get(
pk=view_filter_id
)
except ViewFilter.DoesNotExist:
raise ViewFilterDoesNotExist(
f'The view filter with id {view_filter_id} does not exist.'
)
group = view_filter.view.table.database.group
group.has_user(user, raise_error=True)
return view_filter
def create_filter(self, user, view, field, type_name, value):
"""
Creates a new view filter. The rows that are visible in a view should always
be filtered by the related view filters.
:param user: The user on whose behalf the view filter is created.
:type user: User
:param view: The view for which the filter needs to be created.
:type: View
:param field: The field that the filter should compare the value with.
:type field: Field
:param type_name: The filter type, allowed values are the types in the
view_filter_type_registry `equal`, `not_equal` etc.
:type type_name: str
:param value: The value that the filter must apply to.
:type value: str
:raises ViewFilterNotSupported: When the provided view does not support
filtering.
:raises ViewFilterTypeNotAllowedForField: When the field does not support the
filter type.
:raises FieldNotInTable: When the provided field does not belong to the
provided view's table.
:return: The created view filter instance.
:rtype: ViewFilter
"""
group = view.table.database.group
group.has_user(user, raise_error=True)
# Check if view supports filtering
view_type = view_type_registry.get_by_model(view.specific_class)
if not view_type.can_filter:
raise ViewFilterNotSupported(
f'Filtering is not supported for {view_type.type} views.'
)
view_filter_type = view_filter_type_registry.get(type_name)
field_type = field_type_registry.get_by_model(field.specific_class)
# Check if the field is allowed for this filter type.
if field_type.type not in view_filter_type.compatible_field_types:
raise ViewFilterTypeNotAllowedForField(
type_name, field_type.type
)
# Check if field belongs to the grid views table
if not view.table.field_set.filter(id=field.pk).exists():
raise FieldNotInTable(f'The field {field.pk} does not belong to table '
f'{view.table.id}.')
view_filter = ViewFilter.objects.create(
view=view,
field=field,
type=view_filter_type.type,
value=value
)
view_filter_created.send(self, view_filter=view_filter, user=user)
return view_filter
def update_filter(self, user, view_filter, **kwargs):
"""
Updates the values of an existing view filter.
:param user: The user on whose behalf the view filter is updated.
:type user: User
:param view_filter: The view filter that needs to be updated.
:type view_filter: ViewFilter
:param kwargs: The values that need to be updated, allowed values are
`field`, `value` and `type_name`.
:type kwargs: dict
:raises ViewFilterTypeNotAllowedForField: When the field does not supports the
filter type.
:raises FieldNotInTable: When the provided field does not belong to the
view's table.
:return: The updated view filter instance.
:rtype: ViewFilter
"""
group = view_filter.view.table.database.group
group.has_user(user, raise_error=True)
type_name = kwargs.get('type_name', view_filter.type)
field = kwargs.get('field', view_filter.field)
value = kwargs.get('value', view_filter.value)
view_filter_type = view_filter_type_registry.get(type_name)
field_type = field_type_registry.get_by_model(field.specific_class)
# Check if the field is allowed for this filter type.
if field_type.type not in view_filter_type.compatible_field_types:
raise ViewFilterTypeNotAllowedForField(
type_name,
field_type.type
)
# If the field has changed we need to check if the field belongs to the table.
if (
field.id != view_filter.field_id and
not view_filter.view.table.field_set.filter(id=field.pk).exists()
):
raise FieldNotInTable(f'The field {field.pk} does not belong to table '
f'{view_filter.view.table.id}.')
view_filter.field = field
view_filter.value = value
view_filter.type = type_name
view_filter.save()
view_filter_updated.send(self, view_filter=view_filter, user=user)
return view_filter
def delete_filter(self, user, view_filter):
"""
Deletes an existing view filter.
:param user: The user on whose behalf the view filter is deleted.
:type user: User
:param view_filter: The view filter instance that needs to be deleted.
:type view_filter: ViewFilter
"""
group = view_filter.view.table.database.group
group.has_user(user, raise_error=True)
view_filter_id = view_filter.id
view_filter.delete()
view_filter_deleted.send(self, view_filter_id=view_filter_id,
view_filter=view_filter, user=user)
def apply_sorting(self, view, queryset):
"""
Applies the view's sorting to the given queryset. The first sort, which for now
is the first created, will always be applied first. Secondary sortings are
going to be applied if the values of the first sort rows are the same.
Example:
id | field_1 | field_2
1 | Bram | 20
2 | Bram | 10
3 | Elon | 30
If we are going to sort ascending on field_1 and field_2 the resulting ids are
going to be 2, 1 and 3 in that order.
:param view: The view where to fetch the sorting from.
:type view: View
:param queryset: The queryset where the sorting need to be applied to.
:type queryset: QuerySet
:raises ValueError: When the queryset's model is not a table model or if the
table model does not contain the one of the fields.
:return: The queryset where the sorting has been applied to.
:type: QuerySet
"""
model = queryset.model
# If the model does not have the `_field_objects` property then it is not a
# generated table model which is not supported.
if not hasattr(model, '_field_objects'):
raise ValueError('A queryset of the table model is required.')
order_by = []
for view_sort in view.viewsort_set.all():
# If the to be sort field is not present in the `_field_objects` we
# cannot filter so we raise a ValueError.
if view_sort.field_id not in model._field_objects:
raise ValueError(f'The table model does not contain field '
f'{view_sort.field_id}.')
field = model._field_objects[view_sort.field_id]['field']
field_name = model._field_objects[view_sort.field_id]['name']
field_type = model._field_objects[view_sort.field_id]['type']
order = field_type.get_order(field, field_name, view_sort)
# If the field type does not have a specific ordering expression we can
# order the default way.
if not order:
order = F(field_name)
if view_sort.order == 'ASC':
order = order.asc(nulls_first=True)
else:
order = order.desc(nulls_last=True)
order_by.append(order)
order_by.append('order')
order_by.append('id')
queryset = queryset.order_by(*order_by)
return queryset
def get_sort(self, user, view_sort_id, base_queryset=None):
"""
Returns an existing view sort with the given id.
:param user: The user on whose behalf the view sort is requested.
:type user: User
:param view_sort_id: The id of the view sort.
:type view_sort_id: int
:param base_queryset: The base queryset from where to select the view sort
object from. This can for example be used to do a `select_related`.
:type base_queryset: Queryset
:raises ViewSortDoesNotExist: The the requested view does not exists.
:return: The requested view sort instance.
:type: ViewSort
"""
if not base_queryset:
base_queryset = ViewSort.objects
try:
view_sort = base_queryset.select_related(
'view__table__database__group'
).get(
pk=view_sort_id
)
except ViewSort.DoesNotExist:
raise ViewSortDoesNotExist(
f'The view sort with id {view_sort_id} does not exist.'
)
group = view_sort.view.table.database.group
group.has_user(user, raise_error=True)
return view_sort
def create_sort(self, user, view, field, order):
"""
Creates a new view sort.
:param user: The user on whose behalf the view sort is created.
:type user: User
:param view: The view for which the sort needs to be created.
:type: View
:param field: The field that needs to be sorted.
:type field: Field
:param order: The desired order, can either be ascending (A to Z) or
descending (Z to A).
:type order: str
:raises ViewSortNotSupported: When the provided view does not support sorting.
:raises FieldNotInTable: When the provided field does not belong to the
provided view's table.
:return: The created view sort instance.
:rtype: ViewSort
"""
group = view.table.database.group
group.has_user(user, raise_error=True)
# Check if view supports sorting.
view_type = view_type_registry.get_by_model(view.specific_class)
if not view_type.can_sort:
raise ViewSortNotSupported(
f'Sorting is not supported for {view_type.type} views.'
)
# Check if the field supports sorting.
field_type = field_type_registry.get_by_model(field.specific_class)
if not field_type.can_order_by:
raise ViewSortFieldNotSupported(f'The field {field.pk} does not support '
f'sorting.')
# Check if field belongs to the grid views table
if not view.table.field_set.filter(id=field.pk).exists():
raise FieldNotInTable(f'The field {field.pk} does not belong to table '
f'{view.table.id}.')
# Check if the field already exists as sort
if view.viewsort_set.filter(field_id=field.pk).exists():
raise ViewSortFieldAlreadyExist(f'A sort with the field {field.pk} '
f'already exists.')
view_sort = ViewSort.objects.create(
view=view,
field=field,
order=order
)
view_sort_created.send(self, view_sort=view_sort, user=user)
return view_sort
def update_sort(self, user, view_sort, **kwargs):
"""
Updates the values of an existing view sort.
:param user: The user on whose behalf the view sort is updated.
:type user: User
:param view_sort: The view sort that needs to be updated.
:type view_sort: ViewSort
:param kwargs: The values that need to be updated, allowed values are
`field` and `order`.
:type kwargs: dict
:raises FieldNotInTable: When the field does not support sorting.
:return: The updated view sort instance.
:rtype: ViewSort
"""
group = view_sort.view.table.database.group
group.has_user(user, raise_error=True)
field = kwargs.get('field', view_sort.field)
order = kwargs.get('order', view_sort.order)
# If the field has changed we need to check if the field belongs to the table.
if (
field.id != view_sort.field_id and
not view_sort.view.table.field_set.filter(id=field.pk).exists()
):
raise FieldNotInTable(f'The field {field.pk} does not belong to table '
f'{view_sort.view.table.id}.')
# If the field has changed we need to check if the new field type supports
# sorting.
field_type = field_type_registry.get_by_model(field.specific_class)
if (
field.id != view_sort.field_id and
not field_type.can_order_by
):
raise ViewSortFieldNotSupported(f'The field {field.pk} does not support '
f'sorting.')
# If the field has changed we need to check if the new field doesn't already
# exist as sort.
if (
field.id != view_sort.field_id and
view_sort.view.viewsort_set.filter(field_id=field.pk).exists()
):
raise ViewSortFieldAlreadyExist(f'A sort with the field {field.pk} '
f'already exists.')
view_sort.field = field
view_sort.order = order
view_sort.save()
view_sort_updated.send(self, view_sort=view_sort, user=user)
return view_sort
def delete_sort(self, user, view_sort):
"""
Deletes an existing view sort.
:param user: The user on whose behalf the view sort is deleted.
:type user: User
:param view_sort: The view sort instance that needs to be deleted.
:type view_sort: ViewSort
"""
group = view_sort.view.table.database.group
group.has_user(user, raise_error=True)
view_sort_id = view_sort.id
view_sort.delete()
view_sort_deleted.send(self, view_sort_id=view_sort_id, view_sort=view_sort,
user=user)
| 38.553544
| 87
| 0.625993
|
0079e09be0e3df97cd5291907f50ec0e7d61939c
| 8,355
|
py
|
Python
|
rls/algos/base/base.py
|
qiushuiai/RLs
|
a612ecaf3a47bdcbb412250a3bfdfa579578a183
|
[
"Apache-2.0"
] | 1
|
2021-07-07T12:57:24.000Z
|
2021-07-07T12:57:24.000Z
|
rls/algos/base/base.py
|
qiushuiai/RLs
|
a612ecaf3a47bdcbb412250a3bfdfa579578a183
|
[
"Apache-2.0"
] | null | null | null |
rls/algos/base/base.py
|
qiushuiai/RLs
|
a612ecaf3a47bdcbb412250a3bfdfa579578a183
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
import os
import json
import numpy as np
import tensorflow as tf
from typing import (Dict,
Callable,
Union,
List,
NoReturn,
Optional,
Any)
from rls.utils.tf2_utils import get_device
from rls.utils.display import colorize
from rls.utils.sundry_utils import check_or_create
from rls.utils.logging_utils import (get_logger,
set_log_file)
logger = get_logger(__name__)
class Base:
def __init__(self, *args, **kwargs):
'''
inputs:
a_dim: action spaces
base_dir: the directory that store data, like model, logs, and other data
'''
super().__init__()
self.no_save = bool(kwargs.get('no_save', False))
self.base_dir = base_dir = kwargs.get('base_dir')
tf_dtype = str(kwargs.get('tf_dtype'))
self._tf_data_type = tf.float32 if tf_dtype == 'float32' else tf.float64
tf.keras.backend.set_floatx(tf_dtype)
self.device = get_device()
self.cp_dir, self.log_dir, self.excel_dir = [os.path.join(base_dir, i) for i in ['model', 'log', 'excel']]
if not self.no_save:
check_or_create(self.cp_dir, 'checkpoints(models)')
if 1 == 0: # Not used
import pandas as pd
check_or_create(self.excel_dir, 'excel')
self.excel_writer = pd.ExcelWriter(self.excel_dir + '/data.xlsx')
self.global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int64) # in TF 2.x must be tf.int64, because function set_step need args to be tf.int64.
self._worker_params_dict = {}
self._all_params_dict = dict(global_step=self.global_step)
self.writer = self._create_writer(self.log_dir) # TODO: Annotation
if bool(kwargs.get('logger2file', False)):
set_log_file(log_file=os.path.join(self.log_dir, 'log.txt'))
def _tf_data_cast(self, *args):
'''
TODO: Annotation
'''
with tf.device(self.device):
return [tf.cast(i, self._tf_data_type) for i in args]
def data_convert(self, data: Union[np.ndarray, List]) -> tf.Tensor:
'''
TODO: Annotation
'''
with tf.device(self.device):
if isinstance(data, tuple):
return tuple(
tf.convert_to_tensor(d, dtype=self._tf_data_type)
if d is not None
else d
for d in data
)
else:
return tf.convert_to_tensor(data, dtype=self._tf_data_type)
def _create_saver(self) -> NoReturn:
"""
create checkpoint and saver.
"""
self.checkpoint = tf.train.Checkpoint(**self._all_params_dict)
self.saver = tf.train.CheckpointManager(self.checkpoint, directory=self.cp_dir, max_to_keep=5, checkpoint_name='ckpt')
def _create_writer(self, log_dir: str) -> tf.summary.SummaryWriter:
if not self.no_save:
check_or_create(log_dir, 'logs(summaries)')
return tf.summary.create_file_writer(log_dir)
def init_or_restore(self, base_dir: Optional[str] = None) -> NoReturn:
"""
check whether chekpoint and model be within cp_dir, if in it, restore otherwise initialize randomly.
"""
if base_dir is not None:
cp_dir = os.path.join(base_dir, 'model')
if os.path.exists(os.path.join(cp_dir, 'checkpoint')):
try:
ckpt = tf.train.latest_checkpoint(cp_dir)
self.checkpoint.restore(ckpt).expect_partial() # 从指定路径导入模型
except:
logger.error(colorize(f'restore model from {cp_dir} FAILED.', color='red'))
raise Exception(f'restore model from {cp_dir} FAILED.')
else:
logger.info(colorize(f'restore model from {ckpt} SUCCUESS.', color='green'))
else:
ckpt = self.saver.latest_checkpoint
self.checkpoint.restore(ckpt).expect_partial() # 从本模型目录载入模型,断点续训
logger.info(colorize(f'restore model from {ckpt} SUCCUESS.', color='green'))
logger.info(colorize('initialize model SUCCUESS.', color='green'))
def save_checkpoint(self, **kwargs) -> NoReturn:
"""
save the training model
"""
if not self.no_save:
train_step = int(kwargs.get('train_step', 0))
self.saver.save(checkpoint_number=train_step)
logger.info(colorize(f'Save checkpoint success. Training step: {train_step}', color='green'))
self.write_training_info(kwargs)
def get_init_training_info(self) -> Dict:
'''
TODO: Annotation
'''
path = f'{self.base_dir}/step.json'
if os.path.exists(path):
with open(path, 'r') as f:
data = json.load(f)
else:
data = {}
return dict(
train_step=int(data.get('train_step', 0)),
frame_step=int(data.get('frame_step', 0)),
episode=int(data.get('episode', 0))
)
def write_training_info(self, data: Dict) -> NoReturn:
'''
TODO: Annotation
'''
with open(f'{self.base_dir}/step.json', 'w') as f:
json.dump(data, f)
def writer_summary(self,
global_step: Union[int, tf.Variable],
writer: Optional[tf.summary.SummaryWriter] = None,
summaries: Dict = {}) -> NoReturn:
"""
record the data used to show in the tensorboard
"""
if not self.no_save:
writer = writer or self.writer
writer.set_as_default()
tf.summary.experimental.set_step(global_step)
for k, v in summaries.items():
tf.summary.scalar('AGENT/' + k, v)
writer.flush()
def write_training_summaries(self,
global_step: Union[int, tf.Variable],
summaries: Dict = {},
writer: Optional[tf.summary.SummaryWriter] = None) -> NoReturn:
'''
write tf summaries showing in tensorboard.
'''
if not self.no_save:
writer = writer or self.writer
writer.set_as_default()
tf.summary.experimental.set_step(global_step)
for key, value in summaries.items():
tf.summary.scalar(key, value)
writer.flush()
def get_worker_params(self):
weights_list = list(map(lambda x: x.numpy(), self._worker_params_list))
return weights_list
def set_worker_params(self, weights_list):
[src.assign(tgt) for src, tgt in zip(self._worker_params_list, weights_list)]
def save_weights(self, path: str) -> Any:
"""
save trained weights
:return: None
"""
# self.net.save_weights(os.path.join(path, 'net.ckpt'))
pass
def load_weights(self, path: str) -> Any:
"""
load trained weights
:return: None
"""
# self.net.load_weights(os.path.join(path, 'net.ckpt'))
pass
def close(self) -> Any:
"""
end training, and export the training model
"""
pass
def get_global_step(self) -> int:
"""
get the current training step.
"""
return self.global_step
def set_global_step(self, num: int) -> NoReturn:
"""
set the start training step.
"""
self.global_step.assign(num)
def _model_post_process(self) -> NoReturn:
self._worker_params_list = []
for k, v in self._worker_params_dict.items():
if isinstance(v, tf.keras.Model):
self._worker_params_list.extend(list(map(lambda x: x, v.weights)))
else:
self._worker_params_list.append(v)
self._create_saver()
| 36.806167
| 178
| 0.552005
|
e6886ec74b637f845151ae93ca7398b7fc0b74a7
| 196
|
py
|
Python
|
utils/myutils/cookie2dict.py
|
greats3an/pycxclient
|
68f50d9f262c3b3198ea3b467a263dfd94f342bf
|
[
"Apache-2.0"
] | 3
|
2020-04-26T09:02:56.000Z
|
2022-02-25T16:46:30.000Z
|
utils/myutils/cookie2dict.py
|
greats3an/pycxclient
|
68f50d9f262c3b3198ea3b467a263dfd94f342bf
|
[
"Apache-2.0"
] | 1
|
2021-03-26T16:01:50.000Z
|
2021-03-26T16:01:50.000Z
|
utils/myutils/cookie2dict.py
|
greats3an/pycxclient
|
68f50d9f262c3b3198ea3b467a263dfd94f342bf
|
[
"Apache-2.0"
] | null | null | null |
'''
# Cookie2Dict Module
Turns cookie strings into a dictionary
'''
def cookie2dict(src):
args = {b.split('=')[0].strip():b.split('=')[1].strip() for b in src.split(';')}
return args
| 21.777778
| 84
| 0.607143
|
a7ff693a40eb3d99191844bc9d2ad6998beb947b
| 6,868
|
py
|
Python
|
lumbermill/output/File.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | 15
|
2015-12-14T19:07:28.000Z
|
2022-02-28T13:32:11.000Z
|
lumbermill/output/File.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | null | null | null |
lumbermill/output/File.py
|
dstore-dbap/LumberMill
|
b7cbadc209a83386871735b8ad88b61da917a6ab
|
[
"Apache-2.0"
] | 4
|
2017-02-08T10:49:55.000Z
|
2019-03-19T18:47:46.000Z
|
# -*- coding: utf-8 -*-
import collections
import os
import sys
import time
from io import BytesIO
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Buffers import Buffer
from lumbermill.utils.Decorators import ModuleDocstringParser, setInterval
from lumbermill.utils.DynamicValues import mapDynamicValue
from lumbermill.utils.misc import TimedFunctionManager
@ModuleDocstringParser
class File(BaseThreadedModule):
"""
Store all received events in a file.
file_name: absolute path to filen. String my contain pythons strtime directives and event fields, e.g. %Y-%m-%d.
format: Which event fields to use in the logline, e.g. '$(@timestamp) - $(url) - $(country_code)'
store_interval_in_secs: sending data to es in x seconds intervals.
batch_size: sending data to es if event count is above, even if store_interval_in_secs is not reached.
backlog_size: maximum count of events waiting for transmission. Events above count will be dropped.
compress: Compress output as gzip or snappy file. For this to be effective, the chunk size should not be too small.
Configuration template:
- output.File:
file_name: # <type: string; is: required>
format: # <default: '$(data)'; type: string; is: optional>
store_interval_in_secs: # <default: 10; type: integer; is: optional>
batch_size: # <default: 500; type: integer; is: optional>
backlog_size: # <default: 500; type: integer; is: optional>
compress: # <default: None; type: None||string; values: [None,'gzip','snappy']; is: optional>
"""
module_type = "output"
"""Set module type"""
can_run_forked = False
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.batch_size = self.getConfigurationValue('batch_size')
self.backlog_size = self.getConfigurationValue('backlog_size')
self.file_name = self.getConfigurationValue('file_name')
self.format = self.getConfigurationValue('format')
self.compress = self.getConfigurationValue('compress')
self.file_handles = {}
if self.compress == 'gzip':
try:
# Import module into namespace of object. Otherwise it will not be accessible when process was forked.
self.gzip_module = __import__('gzip')
except ImportError:
self.logger.error('Gzip compression selected but gzip module could not be loaded.')
self.lumbermill.shutDown()
if self.compress == 'snappy':
try:
self.snappy_module = __import__('snappy')
except ImportError:
self.logger.error('Snappy compression selected but snappy module could not be loaded.')
self.lumbermill.shutDown()
self.buffer = Buffer(self.batch_size, self.storeData, self.getConfigurationValue('store_interval_in_secs'), maxsize=self.backlog_size)
TimedFunctionManager.startTimedFunction(self.closeStaleFileHandles)
def getStartMessage(self):
return "File: %s. Max buffer size: %d" % (self.file_name, self.getConfigurationValue('backlog_size'))
@setInterval(60)
def closeStaleFileHandles(self):
"""
Close and delete file handles that are unused since 5 minutes.
"""
for path, file_handle_data in self.file_handles.items():
last_used_time_ago = time.time() - file_handle_data['lru']
if last_used_time_ago < 300:
continue
self.logger.info('Closing stale file handle for %s.' % (path))
file_handle_data['handle'].close()
self.file_handles.pop(path)
def closeAllFileHandles(self):
file_handles = dict(self.file_handles)
for path, file_handle_data in file_handles.items():
self.logger.info('Closing file handle for %s.' % path)
file_handle_data['handle'].close()
self.file_handles.pop(path)
def ensurePathExists(self, path):
dirpath = os.path.dirname(path)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def handleEvent(self, event):
self.buffer.append(event)
yield None
def getOrCreateFileHandle(self, path, mode):
file_handle = None
try:
file_handle = self.file_handles[path]['handle']
self.file_handles[path]['lru'] = time.time()
except KeyError:
try:
file_handle = open(path, mode)
self.file_handles[path] = {'handle': file_handle, 'lru': time.time()}
except:
etype, evalue, etb = sys.exc_info()
self.logger.error('Could no open %s for writing. Exception: %s, Error: %s.' % (path, etype, evalue))
return file_handle
def storeData(self, events):
write_data = collections.defaultdict(str)
for event in events:
path = mapDynamicValue(self.file_name, mapping_dict=event, use_strftime=True)
line = mapDynamicValue(self.format, mapping_dict=event)
write_data["%s" % path] += line + "\n"
for path, lines in write_data.items():
try:
self.ensurePathExists(path)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error('Could no create path %s. Events could not be written. Exception: %s, Error: %s.' % (path, etype, evalue))
return
mode = "a+"
if self.compress == 'gzip':
path += ".gz"
mode += "b"
lines = self.compressGzip(lines)
elif self.compress == 'snappy':
path += ".snappy"
lines = self.compressSnappy(lines)
mode += "b"
try:
fh = self.getOrCreateFileHandle(path, mode)
fh.write(lines)
fh.flush()
return True
except:
etype, evalue, etb = sys.exc_info()
self.logger.error('Could no write event data to %s. Exception: %s, Error: %s.' % (path, etype, evalue))
def shutDown(self):
self.buffer.flush()
self.closeAllFileHandles()
BaseThreadedModule.shutDown(self)
def compressGzip(self, data):
buffer = BytesIO()
compressor = self.gzip_module.GzipFile(mode='wb', fileobj=buffer)
try:
compressor.write(bytes(data, "utf-8"))
finally:
compressor.close()
return buffer.getvalue()
def compressSnappy(self, data):
return self.snappy_module.compress(bytes(data, "utf-8"))
| 42.925
| 142
| 0.612697
|
7ac2b6e1a5f3d1ec290d4912e4f49e8a012c73a4
| 61,839
|
py
|
Python
|
tools/model_transfer.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/model_transfer.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/model_transfer.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import zlib
import logging
import datetime
import numpy as np
from importlib import import_module
from sqlalchemy.ext.hybrid import hybrid_property
from databoard import db
import databoard.config as config
logger = logging.getLogger('databoard')
class NumpyType(db.TypeDecorator):
"""Storing zipped numpy arrays."""
impl = db.LargeBinary
def process_bind_param(self, value, dialect):
# we convert the initial value into np.array to handle None and lists
return zlib.compress(np.array(value).dumps())
def process_result_value(self, value, dialect):
return np.loads(zlib.decompress(value))
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False, unique=True)
hashed_password = db.Column(db.String, nullable=False)
lastname = db.Column(db.String(256), nullable=False)
firstname = db.Column(db.String(256), nullable=False)
email = db.Column(db.String(256), nullable=False, unique=True)
linkedin_url = db.Column(db.String(256), default=None)
twitter_url = db.Column(db.String(256), default=None)
facebook_url = db.Column(db.String(256), default=None)
google_url = db.Column(db.String(256), default=None)
github_url = db.Column(db.String(256), default=None)
website_url = db.Column(db.String(256), default=None)
hidden_notes = db.Column(db.String, default=None)
bio = db.Column(db.String(1024), default=None)
is_want_news = db.Column(db.Boolean, default=True)
access_level = db.Column(db.Enum(
'admin', 'user', 'asked', name='access_level'), default='asked')
# 'asked' needs approval
signup_timestamp = db.Column(db.DateTime, nullable=False)
# Flask-Login fields
is_authenticated = db.Column(db.Boolean, default=True)
is_active = db.Column(db.Boolean, default=True)
# def __init__(self, name, hashed_password, lastname, firstname, email,
# access_level='user', hidden_notes='', linkedin_url='',
# twitter_url='', facebook_url='', google_url='', github_url='',
# website_url='', bio='', is_want_news=True,
# signup_timestamp=None, is_authenticated=None, is_active=None):
# self.name = name
# self.hashed_password = hashed_password
# self.lastname = lastname
# self.firstname = firstname
# self.email = email
# self.access_level = access_level
# self.hidden_notes = hidden_notes
# if signup_timestamp:
# self.signup_timestamp = signup_timestamp
# else:
# self.signup_timestamp = datetime.datetime.utcnow()
# if is_authenticated:
# self.is_authenticated = is_authenticated
# if is_active:
# self.is_active = is_active
# self.linkedin_url = linkedin_url
# self.twitter_url = twitter_url
# self.facebook_url = facebook_url
# self.google_url = google_url
# self.github_url = github_url
# self.website_url = website_url
# self.bio = bio
# self.is_want_news = is_want_news
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __str__(self):
str_ = 'User({})'.format(self.name)
# str_ = 'User({}, admined=['.format(self.name)
# str_ += string.join([team.name for team in self.admined_teams], ', ')
# str_ += '])'
return str_
def __repr__(self):
repr = '''User(name={}, lastname={}, firstname={}, email={},
admined_teams={})'''.format(
self.name, self.lastname, self.firstname, self.email,
self.admined_teams)
return repr
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False, unique=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref=db.backref('admined_teams'))
# initiator asks for merge, acceptor accepts
initiator_id = db.Column(
db.Integer, db.ForeignKey('teams.id'), default=None)
initiator = db.relationship(
'Team', primaryjoin=('Team.initiator_id == Team.id'), uselist=False)
acceptor_id = db.Column(
db.Integer, db.ForeignKey('teams.id'), default=None)
acceptor = db.relationship(
'Team', primaryjoin=('Team.acceptor_id == Team.id'), uselist=False)
creation_timestamp = db.Column(db.DateTime, nullable=False)
# def __init__(self, name, admin, initiator=None, acceptor=None,
# creation_timestamp=None):
# self.name = name
# self.admin = admin
# self.initiator = initiator
# self.acceptor = acceptor
# if creation_timestamp:
# self.creation_timestamp = creation_timestamp
# else:
# self.creation_timestamp = datetime.datetime.utcnow()
def __str__(self):
str_ = 'Team({})'.format(self.name)
return str_
def __repr__(self):
repr = '''Team(name={}, admin_name={},
initiator={}, acceptor={})'''.format(
self.name, self.admin.name, self.initiator, self.acceptor)
return repr
def get_team_members(team):
if team.initiator is not None:
# "yield from" in Python 3.3
for member in get_team_members(team.initiator):
yield member
for member in get_team_members(team.acceptor):
yield member
else:
yield team.admin
def get_n_team_members(team):
return len(list(get_team_members(team)))
def get_user_teams(user):
teams = Team.query.all()
for team in teams:
if user in get_team_members(team):
yield team
def get_user_event_teams(event_name, user_name):
event = Event.query.filter_by(name=event_name).one()
user = User.query.filter_by(name=user_name).one()
event_teams = EventTeam.query.filter_by(event=event).all()
for event_team in event_teams:
if user in get_team_members(event_team.team):
yield event_team
def get_n_user_teams(user):
return len(get_user_teams(user))
# a given RAMP problem, like iris or variable_stars
class Problem(db.Model):
__tablename__ = 'problems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
workflow_id = db.Column(
db.Integer, db.ForeignKey('workflows.id'), nullable=False)
workflow = db.relationship(
'Workflow', backref=db.backref('problems'))
# def __init__(self, name):
# self.name = name
# self.reset()
# # to check if the module and all required fields are there
# self.module
# self.prediction
# self.train_submission
# self.test_submission
def __repr__(self):
repr = 'Problem({})\n{}'.format(self.name, self.workflow)
return repr
def reset(self):
self.workflow = Workflow.query.filter_by(
name=self.module.workflow_name).one()
@property
def module(self):
return import_module('.' + self.name, config.problems_module)
@property
def prediction(self):
return self.module.prediction
def true_predictions_train(self):
_, y_train = self.module.get_train_data()
return self.prediction.Predictions(y_true=y_train)
def true_predictions_test(self):
_, y_test = self.module.get_test_data()
return self.prediction.Predictions(y_true=y_test)
def true_predictions_valid(self, test_is):
_, y_train = self.module.get_train_data()
return self.prediction.Predictions(y_true=y_train[test_is])
@property
def train_submission(self):
return self.workflow.train_submission
@property
def test_submission(self):
return self.workflow.test_submission
class ScoreType(db.Model):
__tablename__ = 'score_types'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
is_lower_the_better = db.Column(db.Boolean, nullable=False)
minimum = db.Column(db.Float, nullable=False)
maximum = db.Column(db.Float, nullable=False)
# def __init__(self, name, is_lower_the_better, minimum, maximum):
# self.name = name
# self.is_lower_the_better = is_lower_the_better
# self.minimum = minimum
# self.maximum = maximum
# # to check if the module and all required fields are there
# self.module
# self.score_function
# self.precision
def __repr__(self):
repr = 'ScoreType(name={})'.format(self.name)
return repr
@property
def module(self):
return import_module('.' + self.name, config.score_types_module)
@property
def score_function(self):
return self.module.score_function
@property
def worst(self):
if self.is_lower_the_better:
return self.maximum
else:
return self.minimum
# default display precision in n_digits
@property
def precision(self):
return self.module.precision
# a given RAMP event, like iris_test or M2_data_science_2015_variable_stars
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
problem_id = db.Column(
db.Integer, db.ForeignKey('problems.id'), nullable=False)
problem = db.relationship('Problem', backref=db.backref(
'events', cascade='all, delete-orphan'))
max_members_per_team = db.Column(db.Integer, default=1)
# max number of submissions in Caruana's ensemble
max_n_ensemble = db.Column(db.Integer, default=80)
is_send_trained_mails = db.Column(db.Boolean, default=True)
is_send_submitted_mails = db.Column(db.Boolean, default=True)
is_public = db.Column(db.Boolean, default=False)
is_controled_signup = db.Column(db.Boolean, default=True)
min_duration_between_submissions = db.Column(db.Integer, default=15 * 60)
opening_timestamp = db.Column(
db.DateTime, default=datetime.datetime(2000, 1, 1, 0, 0, 0))
# before links to submissions in leaderboard are not alive
public_opening_timestamp = db.Column(
db.DateTime, default=datetime.datetime(2000, 1, 1, 0, 0, 0))
closing_timestamp = db.Column(
db.DateTime, default=datetime.datetime(4000, 1, 1, 0, 0, 0))
# the index of the score in self.event_score_types which is used for
# ensembling and contributivity. The default is 0 (first in the list).
official_score_index = db.Column(db.Integer, default=0)
combined_combined_valid_score = db.Column(db.Float, default=None)
combined_combined_test_score = db.Column(db.Float, default=None)
combined_foldwise_valid_score = db.Column(db.Float, default=None)
combined_foldwise_test_score = db.Column(db.Float, default=None)
# def __init__(self, name):
# self.name = name
# # to check if the module and all required fields are there
# # db fields are later initialized by db.tools._set_table_attribute
# self.module
# self.problem = Problem.query.filter_by(
# name=self.module.problem_name).one()
# self.title
# self.prediction
def __repr__(self):
repr = 'Event({})'.format(self.name)
return repr
@property
def module(self):
return import_module('.' + self.name, config.events_module)
@property
def title(self):
return self.module.event_title
@property
def prediction(self):
return self.problem.prediction
@property
def workflow(self):
return self.problem.workflow
@property
def official_score_function(self):
return self.score_types[
self.official_score_index].score_function
@property
def official_score_type(self):
return self.score_types[self.official_score_index]
@property
def train_submission(self):
return self.problem.train_submission
@property
def test_submission(self):
return self.problem.test_submission
@property
def combined_combined_valid_score_str(self):
return None if self.combined_foldwise_valid_score is None else str(
round(self.combined_combined_valid_score,
self.official_score_type.precision))
@property
def combined_combined_test_score_str(self):
return None if self.combined_combined_test_score is None else str(
round(self.combined_combined_test_score,
self.official_score_type.precision))
@property
def combined_foldwise_valid_score_str(self):
return None if self.combined_foldwise_valid_score is None else str(
round(self.combined_foldwise_valid_score,
self.official_score_type.precision))
@property
def combined_foldwise_test_score_str(self):
return None if self.combined_foldwise_test_score is None else str(
round(self.combined_foldwise_test_score,
self.official_score_type.precision))
@property
def is_open(self):
now = datetime.datetime.utcnow()
return now > self.opening_timestamp and now < self.closing_timestamp
@property
def is_public_open(self):
now = datetime.datetime.utcnow()
return now > self.public_opening_timestamp\
and now < self.closing_timestamp
@property
def is_closed(self):
now = datetime.datetime.utcnow()
return now > self.closing_timestamp
@property
def n_jobs(self):
"""Number of jobs for local parallelization.
return: number of live cv folds.
"""
return sum(1 for cv_fold in self.cv_folds if cv_fold.type == 'live')
# many-to-many
class EventScoreType(db.Model):
__tablename__ = 'event_score_types'
id = db.Column(db.Integer, primary_key=True)
# Can be renamed, default is the same as score_type.name
name = db.Column(db.String, nullable=False)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship('Event', backref=db.backref(
'score_types', cascade='all, delete-orphan'))
score_type_id = db.Column(
db.Integer, db.ForeignKey('score_types.id'), nullable=False)
score_type = db.relationship(
'ScoreType', backref=db.backref('events'))
# display precision in n_digits
# default is the same as score_type.precision
precision = db.Column(db.Integer)
db.UniqueConstraint(event_id, score_type_id, name='es_constraint')
# def __init__(self, event, score_type, name=None, precision=None):
# self.event = event
# self.score_type = score_type
# if name is None:
# self.name = score_type.name
# if precision is None:
# self.precision = score_type.precision
def __repr__(self):
repr = '{}: {}/{}'.format(self.name, self.event, self.score_type)
return repr
@property
def score_function(self):
return self.score_type.score_function
@property
def is_lower_the_better(self):
return self.score_type.is_lower_the_better
@property
def minimum(self):
return self.score_type.minimum
@property
def maximum(self):
return self.score_type.maximum
@property
def worst(self):
return self.score_type.worst
cv_fold_types = db.Enum('live', 'test', name='cv_fold_types')
class CVFold(db.Model):
"""Storing train and test folds, more precisely: train and test indices.
Created when the ramp event is set up.
"""
__tablename__ = 'cv_folds'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(cv_fold_types, default='live')
train_is = db.Column(NumpyType, nullable=False)
test_is = db.Column(NumpyType, nullable=False)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship('Event', backref=db.backref(
'cv_folds', cascade='all, delete-orphan'))
def __repr__(self):
return 'fold {}'.format(self.train_is)[:15]
class EventAdmin(db.Model):
__tablename__ = 'event_admins'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship('Event', backref=db.backref(
'event_admins', cascade='all, delete-orphan'))
admin_id = db.Column(
db.Integer, db.ForeignKey('users.id'), nullable=False)
admin = db.relationship(
'User', backref=db.backref('admined_events'))
# many-to-many
class EventTeam(db.Model):
__tablename__ = 'event_teams'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False)
event = db.relationship('Event', backref=db.backref(
'event_teams', cascade='all, delete-orphan'))
team_id = db.Column(
db.Integer, db.ForeignKey('teams.id'), nullable=False)
team = db.relationship(
'Team', backref=db.backref('team_events'))
is_active = db.Column(db.Boolean, default=True)
last_submission_name = db.Column(db.String, default=None)
signup_timestamp = db.Column(db.DateTime, nullable=False)
db.UniqueConstraint(event_id, team_id, name='et_constraint')
# def __init__(self, event, team, is_active=None, signup_timestamp=None):
# self.event = event
# self.team = team
# if signup_timestamp:
# self.signup_timestamp = signup_timestamp
# else:
# self.signup_timestamp = datetime.datetime.utcnow()
# if is_active:
# self.is_active = is_active
def __repr__(self):
repr = '{}/{}'.format(self.event, self.team)
return repr
def get_active_user_event_team(event, user):
# There should always be an active user team, if not, throw an exception
event_teams = EventTeam.query.filter_by(event=event).all()
for event_team in event_teams:
if user in get_team_members(event_team.team) and event_team.is_active:
return event_team
class SubmissionFileType(db.Model):
__tablename__ = 'submission_file_types'
id = db.Column(db.Integer, primary_key=True)
# eg. 'code', 'text', 'data'
name = db.Column(db.String, nullable=False, unique=True)
is_editable = db.Column(db.Boolean, default=True)
max_size = db.Column(db.Integer, default=None)
class Extension(db.Model):
__tablename__ = 'extensions'
id = db.Column(db.Integer, primary_key=True)
# eg. 'py', 'csv', 'R'
name = db.Column(db.String, nullable=False, unique=True)
# many-to-many connection between SubmissionFileType and Extension
class SubmissionFileTypeExtension(db.Model):
__tablename__ = 'submission_file_type_extensions'
id = db.Column(db.Integer, primary_key=True)
type_id = db.Column(
db.Integer, db.ForeignKey('submission_file_types.id'), nullable=False)
type = db.relationship(
'SubmissionFileType', backref=db.backref('extensions'))
extension_id = db.Column(
db.Integer, db.ForeignKey('extensions.id'), nullable=False)
extension = db.relationship(
'Extension', backref=db.backref('submission_file_types'))
db.UniqueConstraint(type_id, extension_id, name='we_constraint')
@property
def file_type(self):
return self.type.name
@property
def extension_name(self):
return self.extension.name
class WorkflowElementType(db.Model):
__tablename__ = 'workflow_element_types'
id = db.Column(db.Integer, primary_key=True)
# file name without extension
# eg, regressor, classifier, external_data
name = db.Column(db.String, nullable=False, unique=True)
# eg, code, text, data
type_id = db.Column(
db.Integer, db.ForeignKey('submission_file_types.id'), nullable=False)
type = db.relationship(
'SubmissionFileType', backref=db.backref('workflow_element_types'))
def __repr__(self):
repr = 'WorkflowElementType(name={}, type={}, is_editable={}, max_size={})'.format(
self.name, self.type.name, self.type.is_editable,
self.type.max_size)
return repr
@property
def file_type(self):
return self.type.name
@property
def is_editable(self):
return self.type.is_editable
@property
def max_size(self):
return self.type.max_size
# training and test code now belongs to the workflow, not the workflow
# element. This latter would requre to carefully define workflow element
# interfaces. Eg, a dilemma: classifier + calibrator needs to handled at the
# workflow level (since calibrator needs held out data). Eventually we should
# have both workflow-level and workflow-element-level code to avoid code
# repetiotion.
class Workflow(db.Model):
__tablename__ = 'workflows'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False, unique=True)
# def __init__(self, name):
# self.name = name
# # to check if the module and all required fields are there
# self.module
# self.train_submission
# self.test_submission
def __repr__(self):
repr = 'Workflow({})'.format(self.name)
for workflow_element in self.elements:
repr += '\n\t' + str(workflow_element)
return repr
@property
def module(self):
return import_module('.' + self.name, config.workflows_module)
@property
def train_submission(self):
return self.module.train_submission
@property
def test_submission(self):
return self.module.test_submission
# In lists we will order files according to their ids
# many-to-many link
# For now files define the workflow, so eg, a feature_extractor + regressor
# is not the same workflow as a feature_extractor + regressor + external data,
# even though the training codes are the same.
class WorkflowElement(db.Model):
__tablename__ = 'workflow_elements'
id = db.Column(db.Integer, primary_key=True)
# Normally name will be the same as workflow_element_type.type.name,
# unless specified otherwise. It's because in more complex workflows
# the same type can occur more then once. self.type below will always
# refer to workflow_element_type.type.name
name = db.Column(db.String, nullable=False)
workflow_id = db.Column(
db.Integer, db.ForeignKey('workflows.id'))
workflow = db.relationship(
'Workflow', backref=db.backref('elements'))
workflow_element_type_id = db.Column(
db.Integer, db.ForeignKey('workflow_element_types.id'),
nullable=False)
workflow_element_type = db.relationship(
'WorkflowElementType', backref=db.backref('workflows'))
# def __init__(self, workflow, workflow_element_type, name_in_workflow=None):
# self.workflow = workflow
# self.workflow_element_type = workflow_element_type
# if name_in_workflow is None:
# self.name = self.workflow_element_type.name
# else:
# self.name = name_in_workflow
def __repr__(self):
return 'Workflow({}): WorkflowElement({})'.format(
self.workflow.name, self.name)
# e.g. 'regression', 'external_data'. Normally == name
@property
def type(self):
return self.workflow_element_type.name
@property
def file_type(self):
return self.workflow_element_type.file_type
@property
def is_editable(self):
return self.workflow_element_type.is_editable
@property
def max_size(self):
return self.workflow_element_type.max_size
# TODO: we should have a SubmissionWorkflowElementType table, describing the
# type of files we are expecting for a given RAMP. Fast unit test should be
# set up there, and each file should be unit tested right after submission.
# Kozmetics: erhaps mark which file the leaderboard link should point to (right
# now it is set to the first file in the list which is arbitrary).
# We will also have to handle auxiliary files (like csvs or other classes).
# User interface could have a sinlge submission form with a menu containing
# the file names for a given ramp + an "other" field when users will have to
# name their files
class SubmissionFile(db.Model):
__tablename__ = 'submission_files'
id = db.Column(db.Integer, primary_key=True)
submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'), nullable=False)
submission = db.relationship(
'Submission',
backref=db.backref('files', cascade='all, delete-orphan'))
# e.g. 'regression', 'external_data'
workflow_element_id = db.Column(
db.Integer, db.ForeignKey('workflow_elements.id'),
nullable=False)
workflow_element = db.relationship(
'WorkflowElement', backref=db.backref('submission_files'))
# e.g., ('code', 'py'), ('data', 'csv')
submission_file_type_extension_id = db.Column(
db.Integer, db.ForeignKey('submission_file_type_extensions.id'),
nullable=False)
submission_file_type_extension = db.relationship(
'SubmissionFileTypeExtension', backref=db.backref('submission_files'))
# eg, 'py'
@property
def is_editable(self):
return self.workflow_element.is_editable
# eg, 'py'
@property
def extension(self):
return self.submission_file_type_extension.extension.name
# eg, 'regressor'
@property
def type(self):
return self.workflow_element.type
# eg, 'regressor', Normally same as type, except when type appears more
# than once in workflow
@property
def name(self):
return self.workflow_element.name
# Complete file name, eg, 'regressor.py'
@property
def f_name(self):
return self.type + '.' + self.extension
@property
def link(self):
return '/' + os.path.join(self.submission.hash_, self.f_name)
@property
def path(self):
return os.path.join(self.submission.path, self.f_name)
@property
def name_with_link(self):
return '<a href="' + self.link + '">' + self.name[:20] + '</a>'
def get_code(self):
with open(self.path) as f:
code = f.read()
return code
def set_code(self, code):
code.encode('ascii') # to raise an exception if code is not ascii
with open(self.path, 'w') as f:
f.write(code)
def __repr__(self):
return 'SubmissionFile(name={}, type={}, extension={}, path={})'.\
format(self.name, self.type, self.extension, self.path)
def combine_predictions_list(predictions_list, index_list=None):
"""Combine predictions in predictions_list[index_list].
By taking the mean of their get_combineable_predictions views.
E.g. for regression it is the actual
predictions, and for classification it is the probability array (which
should be calibrated if we want the best performance). Called both for
combining one submission on cv folds (a single model that is trained on
different folds) and several models on a single fold.
Called by
_get_bagging_score : which combines bags of the same model, trained on
different folds, on the heldout test set
_get_cv_bagging_score : which combines cv-bags of the same model, trained
on different folds, on the training set
get_next_best_single_fold : which does one step of the greedy forward
selection (of different models) on a single fold
_get_combined_predictions_single_fold : which does the full loop of greedy
forward selection (of different models), until improvement, on a single
fold
_get_combined_test_predictions_single_fold : which computes the combination
(constructed on the cv valid set) on the holdout test set, on a single
fold
_get_combined_test_predictions : which combines the foldwise combined
and foldwise best test predictions into a single megacombination
Parameters
----------
predictions_list : list of instances of Predictions
Each element of the list is an instance of Predictions of a given model
on the same data points.
index_list : None | list of integers
The subset of predictions to be combined. If None, the full set is
combined.
Returns
-------
combined_predictions : instance of Predictions
A predictions instance containing the combined (averaged) predictions.
"""
if index_list is None: # we combine the full list
index_list = range(len(predictions_list))
y_comb_list = np.array(
[predictions_list[i].y_pred_comb for i in index_list])
Predictions = type(predictions_list[0])
y_comb = np.nanmean(y_comb_list, axis=0)
combined_predictions = Predictions(y_pred=y_comb)
return combined_predictions
def _get_score_cv_bags(event, score_type, predictions_list, true_predictions,
test_is_list=None):
"""
Computed the bagged score of the predictions in predictions_list.
Called by Submission.compute_valid_score_cv_bag and
db_tools.compute_contributivity.
Parameters
----------
event : instance of Event
Needed for the type of y_comb and
predictions_list : list of instances of Predictions
true_predictions : instance of Predictions
test_is_list : list of integers
Indices of points that should be bagged in each prediction. If None,
the full prediction vectors will be bagged.
Returns
-------
score_cv_bags : instance of Score ()
"""
if test_is_list is None: # we combine the full list
test_is_list = [range(len(predictions.y_pred))
for predictions in predictions_list]
n_samples = true_predictions.n_samples
y_comb = np.array(
[event.prediction.Predictions(n_samples=n_samples)
for _ in predictions_list])
score_cv_bags = []
for i, test_is in enumerate(test_is_list):
y_comb[i].set_valid_in_train(predictions_list[i], test_is)
combined_predictions = combine_predictions_list(y_comb[:i + 1])
valid_indexes = combined_predictions.valid_indexes
score_cv_bags.append(score_type.score_function(
true_predictions, combined_predictions, valid_indexes))
# XXX maybe use masked arrays rather than passing valid_indexes
return score_cv_bags
class SubmissionScore(db.Model):
__tablename__ = 'submission_scores'
id = db.Column(db.Integer, primary_key=True)
submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'), nullable=False)
submission = db.relationship('Submission', backref=db.backref(
'scores', cascade='all, delete-orphan'))
event_score_type_id = db.Column(
db.Integer, db.ForeignKey('event_score_types.id'), nullable=False)
event_score_type = db.relationship(
'EventScoreType', backref=db.backref('submissions'))
# These are cv-bagged scores. Individual scores are found in
# SubmissionToTrain
valid_score_cv_bag = db.Column(db.Float) # cv
test_score_cv_bag = db.Column(db.Float) # holdout
# we store the partial scores so to see the saturation and
# overfitting as the number of cv folds grow
valid_score_cv_bags = db.Column(NumpyType)
test_score_cv_bags = db.Column(NumpyType)
@property
def score_name(self):
return self.event_score_type.name
@property
def score_function(self):
return self.event_score_type.score_function
# default display precision in n_digits
@property
def precision(self):
return self.event_score_type.precision
@property
def train_score_cv_mean(self):
return np.array([ts.train_score for ts in self.on_cv_folds]).mean()
@property
def valid_score_cv_mean(self):
return np.array([ts.valid_score for ts in self.on_cv_folds]).mean()
@property
def test_score_cv_mean(self):
return np.array([ts.test_score for ts in self.on_cv_folds]).mean()
@property
def train_score_cv_std(self):
return np.array([ts.train_score for ts in self.on_cv_folds]).std()
@property
def valid_score_cv_std(self):
return np.array([ts.valid_score for ts in self.on_cv_folds]).std()
@property
def test_score_cv_std(self):
return np.array([ts.test_score for ts in self.on_cv_folds]).std()
# evaluate right after train/test, so no need for 'scored' states
submission_states = db.Enum(
'new', 'checked', 'checking_error', 'trained', 'training_error',
'validated', 'validating_error', 'tested', 'testing_error',
name='submission_states')
submission_types = db.Enum('live', 'test', name='submission_types')
class Submission(db.Model):
"""An abstract (untrained) submission."""
__tablename__ = 'submissions'
id = db.Column(db.Integer, primary_key=True)
event_team_id = db.Column(
db.Integer, db.ForeignKey('event_teams.id'), nullable=False)
event_team = db.relationship('EventTeam', backref=db.backref(
'submissions', cascade='all, delete-orphan'))
name = db.Column(db.String(20, convert_unicode=True), nullable=False)
hash_ = db.Column(db.String, nullable=False, index=True, unique=True)
submission_timestamp = db.Column(db.DateTime, nullable=False)
training_timestamp = db.Column(db.DateTime)
contributivity = db.Column(db.Float, default=0.0)
historical_contributivity = db.Column(db.Float, default=0.0)
type = db.Column(submission_types, default='live')
state = db.Column(submission_states, default='new')
# TODO: hide absolute path in error
error_msg = db.Column(db.String, default='')
# user can delete but we keep
is_valid = db.Column(db.Boolean, default=True)
# We can forget bad models.
# If false, don't combine and set contributivity to zero
is_to_ensemble = db.Column(db.Boolean, default=True)
notes = db.Column(db.String, default='') # eg, why is it disqualified
train_time_cv_mean = db.Column(db.Float, default=0.0)
valid_time_cv_mean = db.Column(db.Float, default=0.0)
test_time_cv_mean = db.Column(db.Float, default=0.0)
train_time_cv_std = db.Column(db.Float, default=0.0)
valid_time_cv_std = db.Column(db.Float, default=0.0)
test_time_cv_std = db.Column(db.Float, default=0.0)
# later also ramp_id
db.UniqueConstraint(event_team_id, name, name='ts_constraint')
# def __init__(self, name, event_team):
# self.name = name
# self.event_team = event_team
# sha_hasher = hashlib.sha1()
# sha_hasher.update(self.event.name.encode('utf-8'))
# sha_hasher.update(self.team.name.encode('utf-8'))
# sha_hasher.update(self.name.encode('utf-8'))
# # We considered using the id, but then it will be given away in the
# # url which is maybe not a good idea.
# self.hash_ = '{}'.format(sha_hasher.hexdigest())
# self.submission_timestamp = datetime.datetime.utcnow()
# event_score_types = EventScoreType.query.filter_by(
# event=event_team.event)
# for event_score_type in event_score_types:
# submission_score = SubmissionScore(
# submission=self, event_score_type=event_score_type)
# db.session.add(submission_score)
# self.reset()
def __str__(self):
return 'Submission({}/{}/{})'.format(
self.event.name, self.team.name, self.name)
def __repr__(self):
repr = '''Submission(event_name={}, team_name={}, name={}, files={},
state={}, train_time={})'''.format(
self.event.name, self.team.name, self.name, self.files,
self.state, self.train_time_cv_mean)
return repr
@hybrid_property
def team(self):
return self.event_team.team
@hybrid_property
def event(self):
return self.event_team.event
@property
def official_score_function(self):
return self.event.official_score_function
@property
def prediction(self):
return self.event.prediction
@hybrid_property
def is_not_sandbox(self):
return self.name != config.sandbox_d_name
@hybrid_property
def is_error(self):
return (self.state == 'training_error') |\
(self.state == 'checking_error') |\
(self.state == 'validating_error') |\
(self.state == 'testing_error')
@hybrid_property
def is_public_leaderboard(self):
return self.is_not_sandbox & self.is_valid & (
(self.state == 'validated') |
(self.state == 'tested'))
@hybrid_property
def is_private_leaderboard(self):
return self.is_not_sandbox & self.is_valid & (self.state == 'tested')
@property
def path(self):
return os.path.join(
config.submissions_path, 'submission_' + '{0:09d}'.format(self.id))
@property
def module(self):
return self.path.lstrip('./').replace('/', '.')
@property
def f_names(self):
return [file.f_name for file in self.files]
@property
def link(self):
return self.files[0].link
@property
def full_name_with_link(self):
return '<a href={}>{}/{}/{}</a>'.format(
self.link, self.event.name, self.team.name, self.name[:20])
@property
def name_with_link(self):
return '<a href={}>{}</a>'.format(self.link, self.name[:20])
@property
def state_with_link(self):
return '<a href=/{}>{}</a>'.format(
os.path.join(self.hash_, 'error.txt'), self.state)
# These were constructing means and stds by fetching fold times. It was
# slow because submission_on_folds contain also possibly large predictions
# If postgres solves this issue (which can be tested on the mean and std
# scores on the private leaderbord), the corresponding columns (which are
# now redundant) can be deleted and these can be uncommented.
# @property
# def train_time_cv_mean(self):
# return np.array([ts.train_time for ts in self.on_cv_folds]).mean()
# @property
# def valid_time_cv_mean(self):
# return np.array([ts.valid_time for ts in self.on_cv_folds]).mean()
# @property
# def test_time_cv_mean(self):
# return np.array([ts.test_time for ts in self.on_cv_folds]).mean()
# @property
# def train_time_cv_std(self):
# return np.array([ts.train_time for ts in self.on_cv_folds]).std()
# @property
# def valid_time_cv_std(self):
# return np.array([ts.valid_time for ts in self.on_cv_folds]).std()
# @property
# def test_time_cv_std(self):
# return np.array([ts.test_time for ts in self.on_cv_folds]).std()
def set_state(self, state):
self.state = state
for submission_on_cv_fold in self.on_cv_folds:
submission_on_cv_fold.state = state
def reset(self):
self.contributivity = 0.0
self.state = 'new'
self.error_msg = ''
for score in self.scores:
score.valid_score_cv_bag = score.event_score_type.worst
score.test_score_cv_bag = score.event_score_type.worst
score.valid_score_cv_bags = None
score.test_score_cv_bags = None
def set_error(self, error, error_msg):
self.reset()
self.state = error
self.error_msg = error_msg
for submission_on_cv_fold in self.on_cv_folds:
submission_on_cv_fold.set_error(error, error_msg)
def compute_valid_score_cv_bag(self):
"""Cv-bag cv_fold.valid_predictions using combine_predictions_list.
The predictions in predictions_list[i] belong to those indicated
by self.on_cv_folds[i].test_is.
"""
true_predictions_train = self.event.problem.true_predictions_train()
if self.is_public_leaderboard:
predictions_list = [submission_on_cv_fold.valid_predictions for
submission_on_cv_fold in self.on_cv_folds]
test_is_list = [submission_on_cv_fold.cv_fold.test_is for
submission_on_cv_fold in self.on_cv_folds]
for score in self.scores:
score.valid_score_cv_bags = _get_score_cv_bags(
self.event, score.event_score_type, predictions_list,
true_predictions_train, test_is_list)
score.valid_score_cv_bag = float(score.valid_score_cv_bags[-1])
else:
for score in self.scores:
score.valid_score_cv_bag = float(score.event_score_type.worst)
score.valid_score_cv_bags = None
db.session.commit()
def compute_test_score_cv_bag(self):
"""Bag cv_fold.test_predictions using combine_predictions_list.
And stores the score of the bagged predictor in test_score_cv_bag. The
scores of partial combinations are stored in test_score_cv_bags.
This is for assessing the bagging learning curve, which is useful for
setting the number of cv folds to its optimal value (in case the RAMP
is competitive, say, to win a Kaggle challenge; although it's kinda
stupid since in those RAMPs we don't have a test file, so the learning
curves should be assessed in compute_valid_score_cv_bag on the
(cross-)validation sets).
"""
if self.is_private_leaderboard:
# When we have submission id in Predictions, we should get the
# team and submission from the db
true_predictions = self.event.problem.true_predictions_test()
predictions_list = [submission_on_cv_fold.test_predictions for
submission_on_cv_fold in self.on_cv_folds]
combined_predictions_list = [
combine_predictions_list(predictions_list[:i + 1]) for
i in range(len(predictions_list))]
for score in self.scores:
score.test_score_cv_bags = [
score.score_function(
true_predictions, combined_predictions) for
combined_predictions in combined_predictions_list]
score.test_score_cv_bag = float(score.test_score_cv_bags[-1])
else:
for score in self.scores:
score.test_score_cv_bag = float(score.event_score_type.worst)
score.test_score_cv_bags = None
db.session.commit()
# contributivity could be a property but then we could not query on it
def set_contributivity(self, is_commit=True):
self.contributivity = 0.0
if self.is_public_leaderboard:
# we share a unit of 1. among folds
unit_contributivity = 1. / len(self.on_cv_folds)
for submission_on_cv_fold in self.on_cv_folds:
self.contributivity +=\
unit_contributivity * submission_on_cv_fold.contributivity
if is_commit:
db.session.commit()
def set_state_after_training(self):
self.training_timestamp = datetime.datetime.utcnow()
states = [submission_on_cv_fold.state
for submission_on_cv_fold in self.on_cv_folds]
if all(state in ['tested'] for state in states):
self.state = 'tested'
elif all(state in ['tested', 'validated'] for state in states):
self.state = 'validated'
elif all(state in ['tested', 'validated', 'trained']
for state in states):
self.state = 'trained'
elif any(state == 'training_error' for state in states):
self.state = 'training_error'
i = states.index('training_error')
self.error_msg = self.on_cv_folds[i].error_msg
elif any(state == 'validating_error' for state in states):
self.state = 'validating_error'
i = states.index('validating_error')
self.error_msg = self.on_cv_folds[i].error_msg
elif any(state == 'testing_error' for state in states):
self.state = 'testing_error'
i = states.index('testing_error')
self.error_msg = self.on_cv_folds[i].error_msg
if 'error' not in self.state:
self.error_msg = ''
def get_next_best_single_fold(event, predictions_list, true_predictions,
best_index_list):
""".
Find the model that minimizes the score if added to
predictions_list[best_index_list] using event.official_score_function.
If there is no model improving the input
combination, the input best_index_list is returned. Otherwise the best
model is added to the list. We could also return the combined prediction
(for efficiency, so the combination would not have to be done each time;
right now the algo is quadratic), but I don't think any meaningful
rule will be associative, in which case we should redo the combination from
scratch each time the set changes. Since now combination = mean, we could
maintain the sum and the number of models, but it would be a bit bulky.
We'll see how this evolves.
Parameters
----------
predictions_list : list of instances of Predictions
Each element of the list is an instance of Predictions of a model
on the same (cross-validation valid) data points.
true_predictions : instance of Predictions
The ground truth.
best_index_list : list of integers
Indices of the current best model.
Returns
-------
best_index_list : list of integers
Indices of the models in the new combination. If the same as input,
no models wer found improving the score.
"""
best_predictions = combine_predictions_list(
predictions_list, index_list=best_index_list)
best_score = event.official_score_function(
true_predictions, best_predictions)
best_index = -1
# Combination with replacement, what Caruana suggests. Basically, if a
# model is added several times, it's upweighted, leading to
# integer-weighted ensembles
r = np.arange(len(predictions_list))
# Randomization doesn't matter, only in case of exact equality.
# np.random.shuffle(r)
# print r
for i in r:
combined_predictions = combine_predictions_list(
predictions_list, index_list=np.append(best_index_list, i))
new_score = event.official_score_function(
true_predictions, combined_predictions)
is_lower_the_better = event.official_score_type.is_lower_the_better
if (is_lower_the_better and new_score < best_score) or\
(not is_lower_the_better and new_score > best_score):
best_predictions = combined_predictions
best_index = i
best_score = new_score
if best_index > -1:
return np.append(best_index_list, best_index), best_score
else:
return best_index_list, best_score
class SubmissionScoreOnCVFold(db.Model):
__tablename__ = 'submission_score_on_cv_folds'
id = db.Column(db.Integer, primary_key=True)
submission_on_cv_fold_id = db.Column(
db.Integer, db.ForeignKey('submission_on_cv_folds.id'), nullable=False)
submission_on_cv_fold = db.relationship(
'SubmissionOnCVFold', backref=db.backref(
'scores', cascade='all, delete-orphan'))
submission_score_id = db.Column(
db.Integer, db.ForeignKey('submission_scores.id'), nullable=False)
submission_score = db.relationship('SubmissionScore', backref=db.backref(
'on_cv_folds', cascade='all, delete-orphan'))
train_score = db.Column(db.Float)
valid_score = db.Column(db.Float)
test_score = db.Column(db.Float)
db.UniqueConstraint(
submission_on_cv_fold_id, submission_score_id, name='ss_constraint')
@property
def event_score_type(self):
return self.submission_score.event_score_type
@property
def score_function(self):
return self.event_score_type.score_function
# TODO: rename submission to workflow and submitted file to workflow_element
# TODO: SubmissionOnCVFold should actually be a workflow element. Saving
# train_pred means that we can input it to the next workflow element
# TODO: implement check
class SubmissionOnCVFold(db.Model):
"""SubmissionOnCVFold.
is an instantiation of Submission, to be trained on a data file and a cv
fold. We don't actually store the trained model in the db (lack of disk and
pickling issues), so trained submission is not a database column. On the
other hand, we will store train, valid, and test predictions. In a sense
substituting CPU time for storage.
"""
__tablename__ = 'submission_on_cv_folds'
id = db.Column(db.Integer, primary_key=True)
submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'), nullable=False)
submission = db.relationship(
'Submission', backref=db.backref(
'on_cv_folds', cascade="all, delete-orphan"))
cv_fold_id = db.Column(
db.Integer, db.ForeignKey('cv_folds.id'), nullable=False)
cv_fold = db.relationship(
'CVFold', backref=db.backref(
'submissions', cascade="all, delete-orphan"))
# filled by cv_fold.get_combined_predictions
contributivity = db.Column(db.Float, default=0.0)
best = db.Column(db.Boolean, default=False)
# prediction on the full training set, including train and valid points
# properties train_predictions and valid_predictions will make the slicing
full_train_y_pred = db.Column(NumpyType, default=None)
test_y_pred = db.Column(NumpyType, default=None)
train_time = db.Column(db.Float, default=0.0)
valid_time = db.Column(db.Float, default=0.0)
test_time = db.Column(db.Float, default=0.0)
state = db.Column(submission_states, default='new')
error_msg = db.Column(db.String, default='')
db.UniqueConstraint(submission_id, cv_fold_id, name='sc_constraint')
# def __init__(self, submission, cv_fold):
# self.submission = submission
# self.cv_fold = cv_fold
# for score in submission.scores:
# submission_score_on_cv_fold = SubmissionScoreOnCVFold(
# submission_on_cv_fold=self, submission_score=score)
# db.session.add(submission_score_on_cv_fold)
# self.reset()
def __repr__(self):
repr = 'state = {}, c = {}'\
', best = {}'.format(
self.state, self.contributivity, self.best)
return repr
@hybrid_property
def is_public_leaderboard(self):
return (self.state == 'validated') | (self.state == 'tested')
@hybrid_property
def is_error(self):
return (self.state == 'training_error') |\
(self.state == 'checking_error') |\
(self.state == 'validating_error') |\
(self.state == 'testing_error')
# The following four functions are converting the stored numpy arrays
# <>_y_pred into Prediction instances
@property
def full_train_predictions(self):
return self.submission.prediction.Predictions(
y_pred=self.full_train_y_pred)
@property
def train_predictions(self):
return self.submission.prediction.Predictions(
y_pred=self.full_train_y_pred[self.cv_fold.train_is])
@property
def valid_predictions(self):
return self.submission.prediction.Predictions(
y_pred=self.full_train_y_pred[self.cv_fold.test_is])
@property
def test_predictions(self):
return self.submission.prediction.Predictions(y_pred=self.test_y_pred)
def reset(self):
self.contributivity = 0.0
self.best = False
self.full_train_y_pred = None
self.test_y_pred = None
self.train_time = 0.0
self.valid_time = 0.0
self.test_time = 0.0
self.state = 'new'
self.error_msg = ''
for score in self.scores:
score.train_score = score.event_score_type.worst
score.valid_score = score.event_score_type.worst
score.test_score = score.event_score_type.worst
def set_error(self, error, error_msg):
self.reset()
self.state = error
self.error_msg = error_msg
def compute_train_scores(self):
true_full_train_predictions =\
self.submission.event.problem.true_predictions_train()
for score in self.scores:
score.train_score = float(score.score_function(
true_full_train_predictions, self.full_train_predictions,
self.cv_fold.train_is))
db.session.commit()
def compute_valid_scores(self):
true_full_train_predictions =\
self.submission.event.problem.true_predictions_train()
for score in self.scores:
score.valid_score = float(score.score_function(
true_full_train_predictions, self.full_train_predictions,
self.cv_fold.test_is))
db.session.commit()
def compute_test_scores(self):
true_test_predictions =\
self.submission.event.problem.true_predictions_test()
for score in self.scores:
score.test_score = float(score.score_function(
true_test_predictions, self.test_predictions))
db.session.commit()
def update(self, detached_submission_on_cv_fold):
"""From trained DetachedSubmissionOnCVFold."""
self.state = detached_submission_on_cv_fold.state
if self.is_error:
self.error_msg = detached_submission_on_cv_fold.error_msg
else:
if self.state in ['trained', 'validated', 'tested']:
self.train_time = detached_submission_on_cv_fold.train_time
if self.state in ['validated', 'tested']:
self.valid_time = detached_submission_on_cv_fold.valid_time
self.full_train_y_pred =\
detached_submission_on_cv_fold.full_train_y_pred
self.compute_train_scores()
self.compute_valid_scores()
if self.state in ['tested']:
self.test_time = detached_submission_on_cv_fold.test_time
self.test_y_pred = detached_submission_on_cv_fold.test_y_pred
self.compute_test_scores()
db.session.commit()
class DetachedSubmissionOnCVFold(object):
"""Copy of SubmissionOnCVFold, all the fields we need in train and test.
It's because SQLAlchemy objects don't persist through
multiprocessing jobs. Maybe eliminated if we do the parallelization
differently, though I doubt it.
"""
def __init__(self, submission_on_cv_fold):
self.train_is = submission_on_cv_fold.cv_fold.train_is
self.test_is = submission_on_cv_fold.cv_fold.test_is
self.full_train_y_pred = submission_on_cv_fold.full_train_y_pred
self.test_y_pred = submission_on_cv_fold.test_y_pred
self.state = submission_on_cv_fold.state
self.name = submission_on_cv_fold.submission.event.name + '/'\
+ submission_on_cv_fold.submission.team.name + '/'\
+ submission_on_cv_fold.submission.name
self.module = submission_on_cv_fold.submission.module
self.error_msg = submission_on_cv_fold.error_msg
self.train_time = submission_on_cv_fold.train_time
self.valid_time = submission_on_cv_fold.valid_time
self.test_time = submission_on_cv_fold.test_time
self.trained_submission = None
self.train_submission =\
submission_on_cv_fold.submission.event.train_submission
self.test_submission =\
submission_on_cv_fold.submission.event.test_submission
def __repr__(self):
repr = 'Submission({}) on fold {}'.format(
self.name, str(self.train_is)[:10])
return repr
user_interaction_type = db.Enum(
'copy',
'download',
'giving credit',
'landing',
'login',
'logout',
'looking at error',
'looking at event',
'looking at leaderboard',
'looking at my_submissions',
'looking at private leaderboard',
'looking at submission',
'looking at user',
'save',
'signing up at event',
'submit',
'upload',
name='user_interaction_type'
)
class UserInteraction(db.Model):
__tablename__ = 'user_interactions'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
timestamp = db.Column(db.DateTime, nullable=False)
interaction = db.Column(user_interaction_type, nullable=False)
note = db.Column(db.String, default=None)
submission_file_diff = db.Column(db.String, default=None)
submission_file_similarity = db.Column(db.Float, default=None)
ip = db.Column(db.String, default=None)
user_id = db.Column(
db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User', backref=db.backref('user_interactions'))
event_team_id = db.Column(
db.Integer, db.ForeignKey('event_teams.id'))
event_team = db.relationship('EventTeam', backref=db.backref(
'user_interactions', cascade='all, delete-orphan'))
submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'))
submission = db.relationship('Submission', backref=db.backref(
'user_interactions', cascade='all, delete-orphan'))
submission_file_id = db.Column(
db.Integer, db.ForeignKey('submission_files.id'))
submission_file = db.relationship('SubmissionFile', backref=db.backref(
'user_interactions', cascade='all, delete-orphan'))
# def __init__(self, interaction, user=None, event=None, note=None,
# submission=None, submission_file=None, diff=None,
# similarity=None):
# self.timestamp = datetime.datetime.utcnow()
# self.interaction = interaction
# self.user = user
# if event is not None:
# self.event_team = get_active_user_event_team(event, user)
# self.ip = request.environ['REMOTE_ADDR']
# self.note = note
# self.submission = submission
# self.submission_file = submission_file
# self.submission_file_diff = diff
# self.submission_file_similarity = similarity
@property
def submission_file_diff_link(self):
if self.submission_file_diff is None:
return None
return os.path.join(
config.submissions_path, 'diff_bef24208a45043059', str(self.id))
@property
def event(self):
if self.event_team:
return self.event_team.event
else:
return None
@property
def team(self):
if self.event_team:
return self.event_team.team
else:
return None
submission_similarity_type = db.Enum(
'target_credit', # credit given by one of the authors of target
'source_credit', # credit given by one of the authors of source
'thirdparty_credit', # credit given by an independent user
name='submission_similarity_type'
)
class SubmissionSimilarity(db.Model):
__tablename__ = 'submission_similaritys'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(submission_similarity_type, nullable=False)
note = db.Column(db.String, default=None)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow())
similarity = db.Column(db.Float, default=0.0)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship(
'User', backref=db.backref('submission_similaritys'))
source_submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'))
source_submission = db.relationship(
'Submission', primaryjoin=(
'SubmissionSimilarity.source_submission_id == Submission.id'))
target_submission_id = db.Column(
db.Integer, db.ForeignKey('submissions.id'))
target_submission = db.relationship(
'Submission', primaryjoin=(
'SubmissionSimilarity.target_submission_id == Submission.id'))
def __repr__(self):
repr = 'type={}, user={}, source={}, target={}, similarity={}'.format(
self.type, self.user, self.source_submission,
self.target_submission, self.similarity)
return repr
class NameClashError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MergeTeamError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class DuplicateSubmissionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TooEarlySubmissionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MissingSubmissionFileError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MissingExtensionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 35.66263
| 91
| 0.667233
|
fe64f1463f37d9ec5804109d5da4ccbc02053bd9
| 345
|
py
|
Python
|
pirates/instance/DistributedTeleportZone.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/instance/DistributedTeleportZone.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/instance/DistributedTeleportZone.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pirates.instance import DistributedInstanceBase
from pandac.PandaModules import NodePath
class DistributedTeleportZone(DistributedInstanceBase.DistributedInstanceBase, NodePath):
def __init__(self, cr):
DistributedInstanceBase.DistributedInstanceBase.__init__(self, cr)
def getInstanceNodePath(self):
return self
| 31.363636
| 89
| 0.808696
|
2ed0bb0631f7265330965778c12bedac40aebdb3
| 863
|
py
|
Python
|
src/migrations/0009_transaction.py
|
AgnosticMe/swiftly
|
1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7
|
[
"MIT"
] | null | null | null |
src/migrations/0009_transaction.py
|
AgnosticMe/swiftly
|
1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7
|
[
"MIT"
] | null | null | null |
src/migrations/0009_transaction.py
|
AgnosticMe/swiftly
|
1fc5fed6e90b8cbdcb6038303537aa0f82ae70d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-20 06:10
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('src', '0008_auto_20210720_1050'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_payment_intent_id', models.CharField(max_length=255, unique=True)),
('amount', models.FloatField(default=0)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='src.job')),
],
),
]
| 33.192308
| 117
| 0.623407
|
a52946bba1567b168ff1bbccbefd7bc724241eba
| 7,278
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 2
|
2022-03-30T09:55:45.000Z
|
2022-03-30T09:55:49.000Z
|
python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 1
|
2022-01-28T07:23:22.000Z
|
2022-01-28T07:23:22.000Z
|
python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 1,
"epsilon": 1e-05,
}
self.optimizer = None
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.layer_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
else:
scale = self.attrs['scale']
bias = self.attrs['shift']
out = paddle.fluid.layers.nn.layer_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
fetch_list = [loss.name]
if self.is_training:
optimizer = None
if self.optimizer == 'sgd':
optimizer = paddle.optimizer.SGD(learning_rate=1e-2)
elif self.optimizer == 'adam':
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
elif self.optimizer == 'lamb':
optimizer = paddle.optimizer.Lamb(
learning_rate=1e-2, lamb_weight_decay=0.0)
if optimizer is not None:
optimizer.minimize(loss)
if exec_mode:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
@unittest.skip('raise error')
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": False,
"shift": True,
"begin_norm_axis": 1,
"epsilon": 1e-05,
}
@unittest.skip('raise error')
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": False,
"begin_norm_axis": 1,
"epsilon": 1e-05,
}
class TestCase3(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05,
}
self.optimizer = None
class TestTrainCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 1,
"epsilon": 1e-05
}
self.optimizer = 'sgd'
def set_atol(self):
self.atol = 1e-6
def set_training(self):
self.is_training = True
self.epoch = 10
class TestTrainCase2(TestBase):
def set_atol(self):
self.atol = 5e-4
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05
}
self.optimizer = 'adam'
def set_training(self):
self.is_training = True
self.epoch = 10
class TestTrainCase3(TestBase):
def set_atol(self):
self.atol = 5e-3
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05
}
self.optimizer = 'lamb'
def set_training(self):
self.is_training = True
self.epoch = 10
# not support `layer_norm(x, param_attr=False, bias_attr=False, **self.attrs)`
if __name__ == "__main__":
unittest.main()
| 30.579832
| 81
| 0.53806
|
54badacfd3b42dbcd9b05fe58b873e011ca7d13b
| 755
|
py
|
Python
|
run_conversion_kazr.py
|
ti-vo/peakTree
|
37fcd51367d575edf7faeba4c20aaac42f577857
|
[
"MIT"
] | 1
|
2021-05-26T05:50:08.000Z
|
2021-05-26T05:50:08.000Z
|
run_conversion_kazr.py
|
ti-vo/peakTree
|
37fcd51367d575edf7faeba4c20aaac42f577857
|
[
"MIT"
] | null | null | null |
run_conversion_kazr.py
|
ti-vo/peakTree
|
37fcd51367d575edf7faeba4c20aaac42f577857
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# coding=utf-8
import datetime
#import matplotlib
#matplotlib.use('Agg')
#import numpy as np
#import matplotlib.pyplot as plt
#import sys, os
import peakTree
import peakTree.helpers as h
import logging
log = logging.getLogger('peakTree')
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
pTB = peakTree.peakTreeBuffer(system='kazr_baecc')
pTB.load_kazr_file('data/tmpkazrspeccmaskgecopolM1.a0.20140202.160004.cdf', load_to_ram=True)
pTB.load_kazr_file('data/tmpkazrspeccmaskgecopolM1.a0.20140221.220005.cdf', load_to_ram=True)
#dt = datetime.datetime(2014,2,21,22,40,0,0)
#pTB.get_tree_at(h.dt_to_ts(dt), 2900, temporal_average=4)
pTB.assemble_time_height('output/')
| 30.2
| 95
| 0.748344
|
42affce752dc0c11aa4d54914e78e53f7a62d804
| 501
|
py
|
Python
|
login.py
|
jyotirmoybhakat/young-desi
|
de2a91f5011a6d5add6df8a8ead3312a07bc6400
|
[
"Apache-2.0"
] | null | null | null |
login.py
|
jyotirmoybhakat/young-desi
|
de2a91f5011a6d5add6df8a8ead3312a07bc6400
|
[
"Apache-2.0"
] | null | null | null |
login.py
|
jyotirmoybhakat/young-desi
|
de2a91f5011a6d5add6df8a8ead3312a07bc6400
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, redirect, url_for, request
app = Flask(__name__)
@app.route('/success/<name>')
def success(name):
return 'welcome %s' % name
@app.route('/login',methods = ['POST', 'GET'])
def login():
if request.method == 'POST':
user = request.form['nm']
return redirect(url_for('success',name = user))
else:
user = request.args.get('nm')
return redirect(url_for('success',name = user))
if __name__ == '__main__':
app.run(debug = True)
| 27.833333
| 54
| 0.616766
|
770efe14a1a7a3cd741cd724cac6ca2584120c2d
| 6,602
|
py
|
Python
|
gearbox/port.py
|
Risto97/gearbox
|
d5b8eb22900f196a61d2f75a28935d8dcd58d6ec
|
[
"MIT"
] | null | null | null |
gearbox/port.py
|
Risto97/gearbox
|
d5b8eb22900f196a61d2f75a28935d8dcd58d6ec
|
[
"MIT"
] | null | null | null |
gearbox/port.py
|
Risto97/gearbox
|
d5b8eb22900f196a61d2f75a28935d8dcd58d6ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from PySide2 import QtGui, QtCore, QtWidgets
from pygears.rtl.port import InPort
from .constants import (IN_PORT, OUT_PORT, PORT_HOVER_COLOR,
PORT_HOVER_BORDER_COLOR, PORT_ACTIVE_COLOR,
PORT_ACTIVE_BORDER_COLOR, Z_VAL_PORT)
class PortItem(QtWidgets.QGraphicsItem):
"""
Base Port Item.
"""
def __init__(self, model, parent=None):
super().__init__(parent)
self.model = model
self._name = model.basename
self._port_type = IN_PORT if isinstance(model, InPort) else OUT_PORT
self._multi_connection = True
self.setAcceptHoverEvents(True)
self.setFlag(self.ItemIsSelectable, False)
self.setFlag(self.ItemSendsScenePositionChanges, True)
self.setZValue(Z_VAL_PORT)
self._pipes = []
self._width = 10.0
self._height = 10.0
self._hovered = False
self._display_name = True
self._color = (49, 115, 100, 255)
self._border_color = (29, 202, 151, 255)
self._border_size = 1
def __str__(self):
return '{}.PortItem("{}")'.format(self.__module__, self.name)
def __repr__(self):
return '{}.PortItem("{}")'.format(self.__module__, self.name)
def boundingRect(self):
return QtCore.QRectF(0.0, 0.0, self._width, self._height)
def plug_pos(self, context, direction):
if self.port_type == IN_PORT:
rel_pos = self.pos() + QtCore.QPointF(self._width / 2,
self._height / 2)
elif self.port_type == OUT_PORT:
if direction == OUT_PORT:
rel_pos = self.pos() + QtCore.QPointF(self._width,
self._height / 2)
else:
rel_pos = self.pos() + QtCore.QPointF(0, self._height / 2)
if context is self.parentItem():
return rel_pos
else:
return self.parentItem().mapToParent(rel_pos)
def paint(self, painter, option, widget):
painter.save()
# rect = QtCore.QRectF(0.0, 0.8, self._width, self._height)
# painter.setBrush(QtGui.QColor(0, 0, 0, 200))
# painter.setPen(QtGui.QPen(QtGui.QColor(0, 0, 0, 255), 1.8))
# path = QtGui.QPainterPath()
# path.addEllipse(rect)
# painter.drawPath(path)
if self._hovered:
color = QtGui.QColor(*PORT_HOVER_COLOR)
border_color = QtGui.QColor(*PORT_HOVER_BORDER_COLOR)
elif self.connected_pipes:
color = QtGui.QColor(*PORT_ACTIVE_COLOR)
border_color = QtGui.QColor(*PORT_ACTIVE_BORDER_COLOR)
else:
color = QtGui.QColor(*self.color)
border_color = QtGui.QColor(*self.border_color)
painter.setBrush(color)
pen = QtGui.QPen(border_color, 1.5)
painter.setPen(pen)
if self.port_type == IN_PORT:
# painter.drawRect(self.boundingRect())
painter.drawEllipse(self.boundingRect())
elif self.port_type == OUT_PORT:
br = self.boundingRect()
triangle = QtGui.QPolygonF()
triangle.push_back(br.topLeft())
triangle.push_back(br.bottomLeft())
triangle.push_back(
QtCore.QPointF(br.topRight() + br.bottomRight()) / 2)
painter.drawPolygon(triangle)
painter.restore()
def itemChange(self, change, value):
if change == self.ItemScenePositionHasChanged:
self.redraw_connected_pipes()
return super(PortItem, self).itemChange(change, value)
# def mousePressEvent(self, event):
# if event.modifiers() != QtCore.Qt.AltModifier:
# self.viewer_start_connection()
# super(PortItem, self).mousePressEvent(event)
# def mouseReleaseEvent(self, event):
# super(PortItem, self).mouseReleaseEvent(event)
def hoverEnterEvent(self, event):
self._hovered = True
super(PortItem, self).hoverEnterEvent(event)
def hoverLeaveEvent(self, event):
self._hovered = False
super(PortItem, self).hoverLeaveEvent(event)
def viewer_start_connection(self):
viewer = self.scene().viewer()
viewer.start_live_connection(self)
def redraw_connected_pipes(self):
if not self.connected_pipes:
return
# for pipe in self.connected_pipes:
# if self.port_type == IN_PORT:
# pipe.draw_path(self, pipe.output_port)
# elif self.port_type == OUT_PORT:
# pipe.draw_path(pipe.input_port, self)
def add_pipe(self, pipe):
self._pipes.append(pipe)
def remove_pipe(self, pipe):
self._pipes.remove(pipe)
@property
def connected_pipes(self):
return self._pipes
@property
def connected_ports(self):
ports = []
port_types = {IN_PORT: 'output_port', OUT_PORT: 'input_port'}
for pipe in self.connected_pipes:
ports.append(getattr(pipe, port_types[self.port_type]))
return ports
@property
def node(self):
return self.parentItem()
@property
def name(self):
return self._name
@name.setter
def name(self, name=''):
self._name = name.strip()
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display=True):
self._display_name = display
@property
def color(self):
return self._color
@color.setter
def color(self, color=(0, 0, 0, 255)):
self._color = color
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color=(0, 0, 0, 255)):
self._border_color = color
@property
def border_size(self):
return self._border_size
@border_size.setter
def border_size(self, size=2):
self._border_size = size
@property
def multi_connection(self):
return self._multi_connection
@multi_connection.setter
def multi_connection(self, mode=False):
conn_type = 'multi' if mode else 'single'
self.setToolTip('{}: ({})'.format(self.name, conn_type))
self._multi_connection = mode
@property
def port_type(self):
return self._port_type
@port_type.setter
def port_type(self, port_type):
self._port_type = port_type
def delete(self):
for pipe in self.connected_pipes:
pipe.delete()
| 30.706977
| 76
| 0.606331
|
7e33d07c937777e464646bb3bab3db8ddbbcb9d9
| 2,209
|
py
|
Python
|
operators/ssd-detector/server.py
|
ReigenAraka/phantoscope
|
bad097dd8e7d682c75e13f811c5fc358e78c942e
|
[
"Apache-2.0"
] | null | null | null |
operators/ssd-detector/server.py
|
ReigenAraka/phantoscope
|
bad097dd8e7d682c75e13f811c5fc358e78c942e
|
[
"Apache-2.0"
] | null | null | null |
operators/ssd-detector/server.py
|
ReigenAraka/phantoscope
|
bad097dd8e7d682c75e13f811c5fc358e78c942e
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
import grpc
from concurrent import futures
import rpc.rpc_pb2
import rpc.rpc_pb2_grpc
from ssd import run, SSDDetectObject as Detector
ENDPOINT = os.getenv("OP_ENDPOINT", "127.0.0.1:51002")
class OperatorServicer(rpc.rpc_pb2_grpc.OperatorServicer):
def __init__(self):
self.detector = Detector()
def Execute(self, request, context):
logging.info("execute")
grpc_metas = []
result_images = run(self.detector, request.datas, request.urls)
# just for test, need adjust proto
logging.info('len of result images: %d', len(result_images))
result_images = result_images[0]
for result_image in result_images:
data = rpc.rpc_pb2.MetaData(data=bytes(result_image, encoding='utf-8'))
grpc_metas.append(data)
return rpc.rpc_pb2.ExecuteReply(nums=len(grpc_metas),
vectors=[],
metadata=grpc_metas)
def Healthy(self, request, context):
logging.info("healthy")
return rpc.rpc_pb2.HealthyReply(healthy="healthy")
def Identity(self, request, context):
logging.info("identity")
detector = self.detector
return rpc.rpc_pb2.IdentityReply(name=detector.name,
endpoint=ENDPOINT,
type=detector.type,
input=detector.input,
output=detector.output,
dimension=detector.dimension,
metricType=detector.metric_type)
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
rpc.rpc_pb2_grpc.add_OperatorServicer_to_server(OperatorServicer(), server)
server.add_insecure_port('[::]:%s' % port)
server.start()
server.wait_for_termination()
if __name__ == "__main__":
formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
port = ENDPOINT.split(":")[-1]
logging.info("Start server")
serve(port)
| 36.213115
| 83
| 0.598008
|
0b7e54596741d7a1ba9f36f537e12762a7e7fb82
| 3,778
|
py
|
Python
|
src/py/MakeAdderModulesPkg.py
|
jstr045329/hask-to-vhdl
|
a9888f9f8e33fef8efe25bd0b30458d556f51a10
|
[
"BSD-3-Clause"
] | null | null | null |
src/py/MakeAdderModulesPkg.py
|
jstr045329/hask-to-vhdl
|
a9888f9f8e33fef8efe25bd0b30458d556f51a10
|
[
"BSD-3-Clause"
] | 18
|
2020-07-20T13:03:04.000Z
|
2020-07-29T21:39:29.000Z
|
src/py/MakeAdderModulesPkg.py
|
jstr045329/hask-to-vhdl
|
a9888f9f8e33fef8efe25bd0b30458d556f51a10
|
[
"BSD-3-Clause"
] | null | null | null |
# Takes the template for counting bits and populates reset name & reset level.
from os import linesep as eol
INPUT_FILENAME = "../VhdTemplates/OneAdderTemplate.vhd"
OUTPUT_FILENAME = "../VhdLibs/AdderModulesPkg.vhd"
ENTITY_STUB = "AddTwoNumbers_%04d_%04d"
BITS_IN_START = 2
BITS_IN_STOP = 16
USE_POSITIVE_RESET = True
def rst_name():
if USE_POSITIVE_RESET:
return "reset"
return "reset_n"
def rst_level():
if USE_POSITIVE_RESET:
return "'1'"
return "'0'"
def one_entity_name(bitsIn, bitsOut):
return ENTITY_STUB % (bitsIn, bitsOut,)
def test_rst_statement():
return "if " + rst_name() + " = " + rst_level() + " then"
def convert_entity_to_component(one_entity_declaration):
y = []
for line in one_entity_declaration:
s = line
s = s.replace("entity", "component")
y.append(s)
y.append(""+eol)
return y
def generate_pkg(lolos):
"lolos is a list of list of strings"
y = []
y.append("----------------------------------------------------------------------------------------------------"+eol)
y.append("-- Adder Module Package"+eol)
y.append("----------------------------------------------------------------------------------------------------"+eol)
y.append("library ieee;"+eol)
y.append("use ieee.std_logic_1164.all;"+eol)
y.append("use ieee.numeric_std.all;"+eol)
y.append(""+eol)
y.append(""+eol)
y.append("package AdderModulePkg is"+eol)
for los in lolos:
y.extend(convert_entity_to_component(los))
y.append(""+eol)
y.append("end package AdderModulePkg;"+eol)
y.append(""+eol)
y.append(""+eol)
y.append("package body AdderModulePkg is"+eol)
y.append("end package body AdderModulePkg;"+eol)
y.append(""+eol)
return y
def main():
entity_declarations = [] # list of list of strings
y = [] # list of strings
with open(INPUT_FILENAME, 'r') as f:
los = f.readlines()
for bits_in in range(BITS_IN_START, BITS_IN_STOP+1):
bits_out = bits_in + 1
one_entity_declaration = []
record_entity = False
for line in los:
include_line = True
if "-- <entity_declaration_start>" in line:
include_line = False
if "-- <entity_declaration_stop>" in line:
include_line = False
s = line
s = s.replace("-- <entity_declaration_start>", "")
s = s.replace("-- <entity_declaration_stop>", "")
s = s.replace("<entity_name_here>", one_entity_name(bits_in, bits_in+1))
s = s.replace("<reset_name_here>", rst_name())
s = s.replace("<test_reset_here>", test_rst_statement())
s = s.replace("<reset_level_here>", rst_level())
s = s.replace("<bits_in_here>", str(bits_in))
s = s.replace("<bits_out_here>", str(bits_out))
s = s.replace("<bits_in_msb_here>", str(bits_in-1))
s = s.replace("<bits_out_msb_here>", str(bits_out-1))
s = s.replace(" - 1", "-1")
if include_line:
y.append(s)
if include_line and record_entity:
one_entity_declaration.append(s)
if "-- <entity_declaration_start>" in line:
one_entity_declaration = []
record_entity = True
if "-- <entity_declaration_stop>" in line:
entity_declarations.append(one_entity_declaration)
one_entity_declaration = []
record_entity = False
y.extend(generate_pkg(entity_declarations))
with open(OUTPUT_FILENAME, 'w') as f:
for line in y:
f.write(line)
if __name__ == "__main__":
main()
| 33.140351
| 120
| 0.562467
|
4c7f3043164e02b37726153913a6158cf5f071fd
| 3,814
|
py
|
Python
|
panini/nats_client/nats_client_interface.py
|
artas728/panini
|
c7bfac4c80d105d0e78ea5afbabd9f2729fdf935
|
[
"MIT"
] | 1
|
2021-03-29T16:10:27.000Z
|
2021-03-29T16:10:27.000Z
|
panini/nats_client/nats_client_interface.py
|
artas728/panini
|
c7bfac4c80d105d0e78ea5afbabd9f2729fdf935
|
[
"MIT"
] | null | null | null |
panini/nats_client/nats_client_interface.py
|
artas728/panini
|
c7bfac4c80d105d0e78ea5afbabd9f2729fdf935
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from ..utils.logger import get_logger
class Msg:
"""
Alternative implementation of the class with "context" field
"""
__slots__ = ("subject", "reply", "data", "sid", "context")
def __init__(self, subject="", reply="", data=b"", sid=0, context={}):
self.subject = subject
self.reply = reply
self.data = data
self.sid = sid
self.context = context
def __repr__(self):
return "<{}: subject='{}' reply='{}' context='{}...'>".format(
self.__class__.__name__,
self.subject,
self.reply,
self.context,
)
class NATSClientInterface(ABC):
def __init__(
self,
client_id: str,
host: str,
port: int or str,
listen_subjects_callbacks: dict,
allow_reconnect: bool or None,
max_reconnect_attempts: int = 60,
reconnecting_time_wait: int = 2,
publish_subjects=[],
auth: dict = {},
queue="",
client_strategy="asyncio", # in_current_process' or in_separate_processes'
redis_host="127.0.0.1",
redis_port="6379",
pending_bytes_limit=65536 * 1024 * 10,
num_of_queues=1,
):
"""
:param client_id: instance identifier for NATS, str
:param port: default '4333'
:param publish_subjects: for example public.binance.order_book.BTC_USD
:param allow_reconnect: False if you want to stop instance when connection lost
:param max_reconnect_attempts:
:param reconnecting_time_wait:
:return: {'success': True} if success otherwise {'success': False, 'error': 'error description'}
"""
self.log = get_logger("panini")
self.connected = False
self.client_id = client_id
self.host = host
self.port = port
self.queue = queue
self.auth = auth
self.listen_subjects_callbacks = listen_subjects_callbacks
self.publish_subjects = publish_subjects
self.allow_reconnect = allow_reconnect
self.max_reconnect_attempts = max_reconnect_attempts
self.reconnecting_time_wait = reconnecting_time_wait
self.client_strategy = client_strategy
self.pending_bytes_limit = pending_bytes_limit
self.redis_host = redis_host
self.redis_port = redis_port
self.num_of_queues = num_of_queues
@abstractmethod
def check_connection(self):
pass
@abstractmethod
def subscribe_new_subject(self, subject: str, callback):
pass
@abstractmethod
def disconnect(self):
pass
@abstractmethod
def publish_sync(
self,
subject: str,
message,
reply_to: str = None,
force: bool = False,
data_type: type or str = "json.dumps",
):
pass
@abstractmethod
def request_sync(
self,
subject: str,
message,
timeout: int = 10,
data_type: type or str = "json.dumps",
):
pass
@abstractmethod
async def publish(
self,
subject: str,
message,
reply_to: str = None,
force: bool = False,
data_type: type or str = "json.dumps",
):
pass
@abstractmethod
async def request(
self,
subject: str,
message,
timeout: int = 10,
data_type: type or str = "json.dumps",
):
pass
@abstractmethod
def request_from_another_thread_sync(
self,
subject: str,
message,
timeout: int = 10,
):
pass
@abstractmethod
async def request_from_another_thread(
self,
subject: str,
message,
timeout: int = 10,
):
pass
| 26.123288
| 105
| 0.584688
|
76989a0677cb71c86693d64a4deaccb088b8ff72
| 1,941
|
py
|
Python
|
parsuite.py
|
egypt/parsuite
|
1a91897f939119804b5e250205dec252066fc1f1
|
[
"MIT"
] | null | null | null |
parsuite.py
|
egypt/parsuite
|
1a91897f939119804b5e250205dec252066fc1f1
|
[
"MIT"
] | null | null | null |
parsuite.py
|
egypt/parsuite
|
1a91897f939119804b5e250205dec252066fc1f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
from pathlib import Path
from sys import exit, modules as sys_modules
from re import search
from parsuite import modules
from parsuite import helpers
from parsuite.core.suffix_printer import *
from parsuite.core.argument import (Argument,ArgumentGroup,
MutuallyExclusiveArgumentGroup)
def add_args(dst_obj,args):
'''Add arguments to a parser object. Useful when initializing
an argument group.
'''
for arg in args:
dst_obj.add_argument(*arg.pargs, **arg.kwargs)
if __name__ == '__main__':
ap = argument_parser = argparse.ArgumentParser(
description='Parse the planet.')
subparsers = ap.add_subparsers(help='Parser module selection.')
subparsers.required = True
subparsers.dest = 'module'
esprint('Starting the parser')
# strap arguments from modules as argument groups
esprint('Loading modules')
for handle,module in modules.handles.items():
helpers.validate_module(module)
sub = subparsers.add_parser(handle,help=module.help)
for arg in module.args:
if arg.__class__ == ArgumentGroup:
group = sub.add_argument_group(*arg.pargs, **arg.kwargs)
add_args(group,arg)
elif arg.__class__ == MutuallyExclusiveArgumentGroup:
group = sub.add_mutually_exclusive_group(
*arg.pargs, **arg.kwargs
)
add_args(group,arg)
else:
sub.add_argument(*arg.pargs, **arg.kwargs)
args = ap.parse_args()
if 'input_file' in args:
helpers.validate_input_file(args.input_file)
elif 'input_files' in args:
helpers.validate_input_files(args.input_files)
esprint(f'Executing module: {args.module}')
modules.handles[args.module].parse(
**vars(args)
)
esprint('Module execution complete. Exiting.')
| 26.22973
| 72
| 0.660484
|
e9aaa58b95dcb45b45bf114c3d2260dad47499cb
| 1,509
|
py
|
Python
|
debug/debug.py
|
ingve/mach-nix
|
f75bd25168232db620f2ddbe5c5281d14c17d03b
|
[
"MIT"
] | 1
|
2021-05-06T06:34:24.000Z
|
2021-05-06T06:34:24.000Z
|
debug/debug.py
|
ingve/mach-nix
|
f75bd25168232db620f2ddbe5c5281d14c17d03b
|
[
"MIT"
] | null | null | null |
debug/debug.py
|
ingve/mach-nix
|
f75bd25168232db620f2ddbe5c5281d14c17d03b
|
[
"MIT"
] | null | null | null |
import json
import os
import subprocess as sp
import tempfile
from os.path import realpath, dirname
from time import time
import toml
from mach_nix.generate import main
pwd = dirname(realpath(__file__))
os.environ['py_ver_str'] = '3.7.5'
os.environ['system'] = 'x86_64-linux'
os.environ['out_file'] = f'{pwd}/overrides.nix'
os.environ['disable_checks'] = 'true'
with open(pwd + "/../mach_nix/provider_defaults.toml") as f:
provider_settings = toml.load(f)
if os.path.isfile("./providers.toml"):
with open(pwd + "./providers.toml") as f:
provider_settings.update(toml.load(f))
provider_settings.update(dict(
# add providers here
))
os.environ['providers'] = json.dumps(provider_settings)
nixpkgs_json = tempfile.mktemp()
cmd = f'nix-build {pwd}/../mach_nix/nix/nixpkgs-json.nix -o {nixpkgs_json} --show-trace'
sp.check_call(cmd, shell=True)
os.environ['nixpkgs_json'] = nixpkgs_json
pypi_deps_db = tempfile.mktemp()
cmd = f'nix-build {pwd}/../mach_nix/nix/deps-db-and-fetcher.nix -A pypi_deps_db_src -o {pypi_deps_db} --show-trace'
sp.check_call(cmd, shell=True)
os.environ['pypi_deps_db_src'] = pypi_deps_db
for key in ('PYPI_FETCHER_COMMIT', 'PYPI_FETCHER_SHA256'):
with open(f"{pypi_deps_db}/{key}") as f:
os.environ[key.lower()] = f.read()
with open(pwd + "/reqs.txt") as f:
os.environ['requirements'] = f.read()
# generates and writes nix expression into ./debug/expr.nix
start = time()
main()
dur = round(time() - start, 1)
print(f"resolving took: {dur}s")
| 29.019231
| 115
| 0.713055
|
78909b517e5e79a2ef8ec247fc91c75021de140b
| 319
|
py
|
Python
|
apps/iotdb_cloud_core/urls.py
|
JulianFeinauer/iotdb-cloud
|
0ccb2dd14c7d0ae3a8e72b3b32ce83de2df48738
|
[
"Apache-2.0"
] | 6
|
2021-08-22T02:25:55.000Z
|
2021-08-28T04:53:36.000Z
|
apps/iotdb_cloud_core/urls.py
|
JulianFeinauer/iotdb-cloud
|
0ccb2dd14c7d0ae3a8e72b3b32ce83de2df48738
|
[
"Apache-2.0"
] | null | null | null |
apps/iotdb_cloud_core/urls.py
|
JulianFeinauer/iotdb-cloud
|
0ccb2dd14c7d0ae3a8e72b3b32ce83de2df48738
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from apps.iotdb_cloud_core.views import HomeView, ExecuteView, CreateIoTDBReleaseView
urlpatterns = [
path('', HomeView.as_view(), name="home"),
path('create', CreateIoTDBReleaseView.as_view(), name="release/create"),
path('execute', ExecuteView.as_view(), name="execute"),
]
| 31.9
| 85
| 0.733542
|
edb9ea411aca473686e3e1b3aa402b912dc40269
| 123
|
py
|
Python
|
10430.py
|
FelisCatusKR/Baekjoon_Python3
|
d84dc9421fe956001864d138b6d6ec9ebd793edf
|
[
"MIT"
] | null | null | null |
10430.py
|
FelisCatusKR/Baekjoon_Python3
|
d84dc9421fe956001864d138b6d6ec9ebd793edf
|
[
"MIT"
] | null | null | null |
10430.py
|
FelisCatusKR/Baekjoon_Python3
|
d84dc9421fe956001864d138b6d6ec9ebd793edf
|
[
"MIT"
] | null | null | null |
# 10430.py
a, b, c = map(int, input().split())
print((a+b)%c)
print(((a%c)+(b%c))%c)
print((a*b)%c)
print(((a%c)*(b%c))%c)
| 20.5
| 35
| 0.504065
|
ea1394893a70610f570ae211a57334aa14d3eefe
| 6,760
|
py
|
Python
|
tools/evaluate.py
|
thangnx183/kaggle-understanding-clouds
|
15ad2a9029958262437b899cb00525579da23911
|
[
"BSD-2-Clause"
] | 207
|
2019-11-21T19:07:17.000Z
|
2022-03-28T10:53:57.000Z
|
tools/evaluate.py
|
ChasingStar95/kaggle-understanding-clouds
|
898319b564deab02b4267cc658bbebdbb15c49de
|
[
"BSD-2-Clause"
] | 12
|
2019-12-04T11:32:30.000Z
|
2022-03-12T00:06:11.000Z
|
tools/evaluate.py
|
ChasingStar95/kaggle-understanding-clouds
|
898319b564deab02b4267cc658bbebdbb15c49de
|
[
"BSD-2-Clause"
] | 60
|
2019-11-21T17:32:56.000Z
|
2022-03-28T10:53:58.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import gc
import argparse
import itertools
import tqdm
import pandas as pd
import numpy as np
import cv2
LABEL_MAP = {
'Fish': 0,
'Flower': 1,
'Gravel': 2,
'Sugar': 3
}
def mask2rle(img):
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle2mask(height, width, encoded):
img = np.zeros(height*width, dtype=np.uint8)
if isinstance(encoded, float):
return img.reshape((width, height)).T
s = encoded.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape((width, height)).T
def load_predictions(input_dirs, df_train):
print(input_dirs)
image_ids = os.path.join(input_dirs[0], 'image_ids.csv')
image_ids = pd.read_csv(image_ids)['Image']
dfs = []
for input_dir in input_dirs:
filepath = os.path.join(input_dir, 'cls.csv')
if os.path.exists(filepath):
dfs.append(pd.read_csv(filepath, index_col='image_id'))
ret = []
cls_records = []
for i, image_id in tqdm.tqdm(enumerate(image_ids), total=len(image_ids)):
predictions = []
for input_dir in input_dirs:
filepath = os.path.join(input_dir, f'{image_id}.npz')
if not os.path.exists(filepath):
continue
with np.load(filepath) as data:
arr = data['arr_0']
predictions.append(arr.astype(np.float16) / 255.0)
predictions = np.mean(np.stack(predictions, axis=0), axis=0)
cls_probs = []
for c in range(0,4):
image_id_with_cls = f'{image_id}_{c}'
rle_encoded = df_train.loc[image_id_with_cls]['EncodedPixels']
label = rle2mask(1400, 2100, rle_encoded)
label = cv2.resize(label, (525,350), interpolation=cv2.INTER_NEAREST)
ret.append((image_id_with_cls, predictions[c,:,:], label))
cls_prob = np.sort(predictions[c,:,:].flatten())
cls_prob = np.mean(cls_prob[-17500:])
cls_probs.append(cls_prob)
cls_records.append(tuple([image_id] + cls_probs))
del predictions
df_seg = pd.DataFrame.from_records(cls_records, columns=['image_id', 'p0', 'p1', 'p2', 'p3'])
df_seg = df_seg.set_index('image_id')
dfs.append(df_seg)
df = sum(dfs) / len(dfs)
df.values[np.arange(df.values.shape[0]), np.argmax(df.values, axis=1)] = 1.0
gc.collect()
return ret, df
def evaluate(predictions, df_cls, cls_thresholds=[0.5,0.5,0.5,0.5], thresholds=[0.5,0.5,0.5,0.5]):
image_ids = []
masks = []
labels = []
for p in predictions:
image_id, mask, label = p
cls_id = int(image_id[-1:])
if df_cls is not None:
cls_score = df_cls.loc[image_id[:-2]][f'p{cls_id}']
else:
cls_score = np.array([1.0])
thres = thresholds[cls_id]
cls_thres = cls_thresholds[cls_id]
cls_prediction = (cls_score > cls_thres)
mask_prediction = (mask > thres)
mask_prediction = np.logical_and(mask_prediction, cls_prediction)
image_ids.append(image_id)
masks.append(mask_prediction)
labels.append(label)
masks = np.array(masks)
labels = np.array(labels)
return compute_metrics(masks, labels)
def compute_metrics(predicts, labels):
N, H, W = predicts.shape
predicts = predicts.reshape((-1, H*W))
labels = labels.reshape((-1, H*W))
sum_p = np.sum(predicts, axis=1)
sum_l = np.sum(labels, axis=1)
intersection = np.sum(np.logical_and(predicts, labels), axis=1)
numer = 2*intersection
denom = sum_p + sum_l
dice = numer / (denom + 1e-6)
empty_indices = np.where(sum_l <= 0)[0]
non_empty_indices = np.where(sum_l > 0)[0]
if len(non_empty_indices) == 0:
non_empty_mean_dice = 0.0
else:
non_empty_dice = dice[non_empty_indices]
non_empty_mean_dice = float(np.mean(non_empty_dice))
all_non_empty_index = np.where(numer > 0)[0]
all_empty_index = np.where(denom == 0)[0]
dice[all_empty_index] = 1
mean_dice = float(np.mean(dice))
cls_accuracy = (len(all_non_empty_index) + len(all_empty_index)) / N
correct_indices = np.where((sum_p > 0) == (sum_l > 0))[0]
incorrect_indices = np.where((sum_p > 0) != (sum_l > 0))[0]
tp = len(np.where(sum_l[correct_indices] > 0)[0])
tn = len(np.where(sum_l[correct_indices] == 0)[0])
fp = len(np.where(sum_l[incorrect_indices] == 0)[0])
fn = len(np.where(sum_l[incorrect_indices] > 0)[0])
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
tnr = tn / (tn + fp + 1e-10)
fpr = fp / (fp + tn + 1e-10)
return {'mean_dice': mean_dice,
'mean_dice_non_empty': non_empty_mean_dice,
'cls_acc': cls_accuracy,
'precision': precision,
'recall': recall,
'tnr': tnr,
'fpr': fpr,
'tp/tn/fp/fn': [tp,tn,fp,fn]}
def parse_args():
parser = argparse.ArgumentParser(description='evaluate')
parser.add_argument('--input_dir', dest='input_dir',
help='the directory where inferenced files are located',
type=str)
return parser.parse_args()
def main():
print('evaluate')
args = parse_args()
df_train = pd.read_csv('data/train.csv')
def _to_image_id_class_id(v):
image_id = v[:v.find('_')]
label = v[v.find('_')+1:]
return f'{image_id}_{LABEL_MAP[label]}'
df_train['ImageId_ClassId'] = df_train.Image_Label.map(_to_image_id_class_id)
df_train = df_train.set_index('ImageId_ClassId')
input_dirs = args.input_dir.split(',')
pred_dev, df_cls_dev = load_predictions(
[os.path.join(input_dir, 'dev') for input_dir in input_dirs], df_train)
pred_test_dev, df_cls_test_dev = load_predictions(
[os.path.join(input_dir, 'test_dev') for input_dir in input_dirs], df_train)
cls_thresholds = [0.7,0.7,0.7,0.7]
thresholds = [0.425,0.425,0.425,0.425]
print(thresholds, cls_thresholds)
print('dev:', evaluate(pred_dev, df_cls_dev, thresholds=thresholds, cls_thresholds=cls_thresholds))
print('test_dev:', evaluate(pred_test_dev, df_cls_test_dev, thresholds=thresholds, cls_thresholds=cls_thresholds))
if __name__ == '__main__':
main()
| 30.727273
| 118
| 0.612722
|
bc6803274ef2766dfec54281df8e5ef590403ece
| 336
|
py
|
Python
|
pirates/battle/DistributedShipCannonAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/battle/DistributedShipCannonAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/battle/DistributedShipCannonAI.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedShipCannonAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedShipCannonAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
| 42
| 83
| 0.833333
|
76b7e71dc30f9ad6abcce8ed8d8ca1a454d4338e
| 1,155
|
py
|
Python
|
src/ebonite/runtime/helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | 1
|
2019-11-27T14:33:45.000Z
|
2019-11-27T14:33:45.000Z
|
src/ebonite/runtime/helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | null | null | null |
src/ebonite/runtime/helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | null | null | null |
from ebonite.core.objects import core
from ebonite.runtime.command_line import start_runtime
from ebonite.runtime.interface import Interface
from ebonite.runtime.interface.ml_model import ModelLoader, model_interface
from ebonite.runtime.server import Server
from ebonite.utils.importing import module_importable
def run_model_server(model: 'core.Model', server: Server = None):
"""
:func:`.start_runtime` wrapper helper which starts Ebonite runtime for given model and (optional) server
:param model: model to start Ebonite runtime for
:param server: server to use for Ebonite runtime, default is a flask-based server
:return: nothing
"""
if server is None:
if module_importable('flask') and module_importable('flasgger'):
from ebonite.ext.flask import FlaskServer
server = FlaskServer()
else:
raise RuntimeError('You need to install flask and flasgger to use test flask server')
class MockLoader(ModelLoader):
def load(self) -> Interface:
model.ensure_loaded()
return model_interface(model)
start_runtime(MockLoader(), server)
| 37.258065
| 108
| 0.72381
|
a207c65b37cb17f3ea2d127906e30cef4587116d
| 4,550
|
py
|
Python
|
HSCard.py
|
JEndler/hearthstoneAI
|
9a6b23e4ab868e48eaa619be4efc701985bd2160
|
[
"MIT"
] | 1
|
2021-07-23T18:32:49.000Z
|
2021-07-23T18:32:49.000Z
|
HSCard.py
|
JEndler/hearthstoneAI
|
9a6b23e4ab868e48eaa619be4efc701985bd2160
|
[
"MIT"
] | null | null | null |
HSCard.py
|
JEndler/hearthstoneAI
|
9a6b23e4ab868e48eaa619be4efc701985bd2160
|
[
"MIT"
] | null | null | null |
import numpy as np
import Card as cards
#import HearthSimulation as HearthSim
class HSCard():
FirstName = "" #for cards like shifter zerus
Name = ""
Type = "" # TYPE MINION, SPELL, ENCHANTMENT, HERO POWER, WEAPON or HERO
Health = -1
Attack = -1
Stats = np.array([Health, Attack])
Durability = -1
Cost = -1 # NORMAL COST WITHOUT REDUCTIONS
OriginalCost = -1
CardClass = "NEUTRAL"
Collectible = True
Mechanics = [] # ["CHARGE", "BATTLECRY"]
MaxHealth = -1
MaxHealth = -1
ID = 0
CardId = 0
# LIST OF ALL MECHANICS --->
# ADJACENT_BUFF, AI_MUST_PLAY, AURA -> (Aura Buffs e.g.["Stormwind Champion"]), BATTLECRY, CHARGE,
# CHOOSE_ONE, COMBO, COUNTER -> (Essentially just Counterspell), DEATHRATTLE,
# DISCOVER, DIVINE_SHIELD, ENRAGED, FORGETFUL -> (50% to Attack the wrong target), FREEZE, IMMUNE, INSPIRE,
# JADE_GOLEM,
# MORPH -> (Cards such as ["Polymorph"] or ["Devolve"]), OVERLOAD, POISONOUS,
# RITUAL -> (C'Thun Buffs such as ["C'Thun's Chosen"]), SECRET, SILENCE, STEALTH, SPELLPOWER, TAG_ONE_TURN_EFFECT,
# TAUNT, TOPDECK -> Cards that reveal themselves when drawn e.g. ["Flame Leviathan"], UNTOUCHABLE,
# WINDFURY, ImmuneToSpellpower -> (Cards that don't increase their Damage normally such as ["Arcane Missiles"]),
# InvisibleDeathrattle -> (Used for some Boss Fights)
# ENTOURAGE -> (Cards wich create random Cards from a given Pool such as ["Ysera"], ["Animal Companion"])
Set = "" # BASIC, CLASIC, ONE NIGHT IN KARAZHAN
MultiClassGroup = "" # JADE_LOTUS, GRIMY_GOONS or KABAL
Faction = "" # ALLIANCE or HORDE
Race = "" # MURLOC, BEAST, DRAGON, MECHANICAL, DEMON, ELEMENTAL, TOTEM or PIRATE
Text = ""
Source = "" # CREATED BY e.g. PRIMORDIAL GLYPH
Alive = True
canAttack = False
Player = 0
SpellDamage = -1
def __init__(self, name, id):
self.ID = id
self.CardId = id
card = cards.cards.searchname(name)
self.Name = name
self.FirstName = name
self.Pos = 0
if card is not -1:
if card["type"] == "MINION":
self.Stats = np.array([card["attack"], card["health"]])
self.Health = card["health"]
self.Attack = card["attack"]
self.Type = "MINION"
if "race" in card: self.Race = card["race"]
if "faction" in card: self.Faction = card["faction"]
if "MultiClassGroup" in card: self.MultiClassGroup = card["MultiClassGroup"]
elif card["type"] == "SPELL":
self.Type = "SPELL"
elif card["type"] == "WEAPON":
self.Type = "WEAPON"
self.Attack = card["attack"]
self.Durability = card["durability"]
elif card["type"] == "HERO":
self.Health = 0
self.Attack = 0
self.Type == "Hero"
elif card["type"] == "HERO POWER":
self.Type == "Hero"
elif card["type"] == "ENCHANTMENT":
pass
if "CardClass" in card: self.CardClass = card["CardClass"]
if "mechanics" in card: self.Mechanics = card["mechanics"]
if "set" in card: self.Set = card["set"]
if "text" in card: self.Text = card["text"]
if "cost" in card: self.Cost = card["cost"]
def __repr__(self):
return self.Name
#GET-METHODS
def getName(self):
return self.Name
def getType(self):
return self.Type
def getHealth(self):
return self.Health
def getAttack(self):
return self.Attack
def getStats(self):
return self.Stats
def getDurability(self):
return self.Durability
def getCost(self):
return self.Cost
def getCardClass(self):
return self.CardClass
def getCollectible(self):
return self.Collectible
def getMechanics(self):
return self.Mechanics
def getSet(self):
return self.Set
def getMultiClassGroup(self):
return self.MultiClassGroup
def getFaction(self):
return self.Faction
def getRace(self):
return self.Race
def Adapt(self):
pass
#todo
def getText(self):
return self.Text
def getSource(self):
return self.Source
#SET-METHODS
def setSource(self, source):
self.Source = source
def setHealth(self, health):
self.Health = health
def adjHealth(self, Ammount):
self.Health += Ammount
def adjAttack(self, Ammount):
self.Attack -= Ammount
#def Health(self, ammount):
#todo
pass
#def Attack(self,ammount):
#todo
# pass
def Reset(self):
#todo
pass
def setAttack(self, attack):
self.Attack = attack
def setCost(self, cost):
self.Cost = cost
def getPosition(self):
pass
def getMaxHealth(self):
#todo
pass
#def checkdie(self):
def triggerDeathrattle(self):
pass
#def getCardsForClass(self, class):
#TODO
print(HSCard("Murloc Tidecaller",9).Attack)
#cardlist
list = cards.cards.getJsonFromFile()
cardlist = []
for card in list:
cardlist.append(card)
| 27.575758
| 115
| 0.681319
|
df290db81696ba6b8e8668b964023295e059f03d
| 3,154
|
py
|
Python
|
acapy_client/api/did_exchange/post_didexchange_conn_id_accept_request.py
|
Indicio-tech/acapy-client
|
0bd47af23308362db749c2671a3e7f8259855897
|
[
"Apache-2.0"
] | 4
|
2021-08-05T09:20:34.000Z
|
2021-08-08T19:37:29.000Z
|
acapy_client/api/did_exchange/post_didexchange_conn_id_accept_request.py
|
Indicio-tech/acapy-client
|
0bd47af23308362db749c2671a3e7f8259855897
|
[
"Apache-2.0"
] | null | null | null |
acapy_client/api/did_exchange/post_didexchange_conn_id_accept_request.py
|
Indicio-tech/acapy-client
|
0bd47af23308362db749c2671a3e7f8259855897
|
[
"Apache-2.0"
] | 2
|
2021-08-12T18:18:45.000Z
|
2021-08-14T13:22:28.000Z
|
from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.conn_record import ConnRecord
from ...types import UNSET, Response, Unset
def _get_kwargs(
conn_id: str,
*,
client: Client,
mediation_id: Union[Unset, None, str] = UNSET,
my_endpoint: Union[Unset, None, str] = UNSET,
) -> Dict[str, Any]:
url = "{}/didexchange/{conn_id}/accept-request".format(client.base_url, conn_id=conn_id)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {
"mediation_id": mediation_id,
"my_endpoint": my_endpoint,
}
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
}
def _parse_response(*, response: httpx.Response) -> Optional[ConnRecord]:
if response.status_code == 200:
response_200 = ConnRecord.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[ConnRecord]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
conn_id: str,
*,
client: Client,
mediation_id: Union[Unset, None, str] = UNSET,
my_endpoint: Union[Unset, None, str] = UNSET,
) -> Response[ConnRecord]:
kwargs = _get_kwargs(
conn_id=conn_id,
client=client,
mediation_id=mediation_id,
my_endpoint=my_endpoint,
)
response = httpx.post(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
conn_id: str,
*,
client: Client,
mediation_id: Union[Unset, None, str] = UNSET,
my_endpoint: Union[Unset, None, str] = UNSET,
) -> Optional[ConnRecord]:
""" """
return sync_detailed(
conn_id=conn_id,
client=client,
mediation_id=mediation_id,
my_endpoint=my_endpoint,
).parsed
async def asyncio_detailed(
conn_id: str,
*,
client: Client,
mediation_id: Union[Unset, None, str] = UNSET,
my_endpoint: Union[Unset, None, str] = UNSET,
) -> Response[ConnRecord]:
kwargs = _get_kwargs(
conn_id=conn_id,
client=client,
mediation_id=mediation_id,
my_endpoint=my_endpoint,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.post(**kwargs)
return _build_response(response=response)
async def asyncio(
conn_id: str,
*,
client: Client,
mediation_id: Union[Unset, None, str] = UNSET,
my_endpoint: Union[Unset, None, str] = UNSET,
) -> Optional[ConnRecord]:
""" """
return (
await asyncio_detailed(
conn_id=conn_id,
client=client,
mediation_id=mediation_id,
my_endpoint=my_endpoint,
)
).parsed
| 24.261538
| 92
| 0.629994
|
5831071869c62d20610ae46451667abdf95d1235
| 1,818
|
py
|
Python
|
nnvm/tests/python/frontend/onnx/test_forward.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 22
|
2019-02-20T12:42:20.000Z
|
2021-12-25T06:09:46.000Z
|
nnvm/tests/python/frontend/onnx/test_forward.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 4
|
2019-04-01T07:36:04.000Z
|
2022-03-24T03:11:26.000Z
|
nnvm/tests/python/frontend/onnx/test_forward.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 7
|
2019-03-20T16:04:37.000Z
|
2021-04-28T18:40:11.000Z
|
import numpy as np
import nnvm
import tvm
from tvm.contrib import graph_runtime
from nnvm.testing.config import ctx_list
import onnx
from model_zoo import super_resolution, squeezenet1_1, lenet
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
import onnx_caffe2.backend
def get_caffe2_output(model, x, dtype='float32'):
prepared_backend = onnx_caffe2.backend.prepare(model)
W = {model.graph.input[0].name: x.astype(dtype)}
c2_out = prepared_backend.run(W)[0]
return c2_out
def get_tvm_output(graph, x, target, ctx, dtype='float32'):
new_sym, params = nnvm.frontend.from_onnx(graph)
shape_dict = {'input_0': x.shape}
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('input_0', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
dtype = 'float32'
x = np.random.uniform(size=data_shape)
model = onnx.load(graph_file)
c2_out = get_caffe2_output(model, x, dtype)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, x, target, ctx, dtype)
np.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
def verify_super_resolution_example():
verify_onnx_forward_impl(super_resolution, (1, 1, 224, 224), (1, 1, 672, 672))
def verify_squeezenet1_1():
verify_onnx_forward_impl(squeezenet1_1, (1, 3, 224, 224), (1, 1000))
def verify_lenet():
verify_onnx_forward_impl(lenet, (1, 1, 28, 28), (1, 10))
if __name__ == '__main__':
verify_super_resolution_example()
verify_squeezenet1_1()
verify_lenet()
| 35.647059
| 92
| 0.685919
|
c6eba5ac03d4ae0e1b5c18c4e287f6d989ba9de0
| 5,466
|
py
|
Python
|
test/automl/test_warmstart.py
|
dalian-ai/FLAML
|
f8cc38bc16fc3a09074b91b82bfda1d4ed9fd9b0
|
[
"MIT"
] | null | null | null |
test/automl/test_warmstart.py
|
dalian-ai/FLAML
|
f8cc38bc16fc3a09074b91b82bfda1d4ed9fd9b0
|
[
"MIT"
] | null | null | null |
test/automl/test_warmstart.py
|
dalian-ai/FLAML
|
f8cc38bc16fc3a09074b91b82bfda1d4ed9fd9b0
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from sklearn.datasets import load_iris
from flaml import AutoML
from flaml.model import LGBMEstimator
from flaml import tune
class TestWarmStart(unittest.TestCase):
def test_fit_w_freezinghp_starting_point(self, as_frame=True):
automl = AutoML()
automl_settings = {
"time_budget": 1,
"metric": "accuracy",
"task": "classification",
"estimator_list": ["lgbm"],
"log_file_name": "test/iris.log",
"log_training_metric": True,
"n_jobs": 1,
"model_history": True,
}
X_train, y_train = load_iris(return_X_y=True, as_frame=as_frame)
if as_frame:
# test drop column
X_train.columns = range(X_train.shape[1])
X_train[X_train.shape[1]] = np.zeros(len(y_train))
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
automl_val_accuracy = 1.0 - automl.best_loss
print("Best ML leaner:", automl.best_estimator)
print("Best hyperparmeter config:", automl.best_config)
print("Best accuracy on validation data: {0:.4g}".format(automl_val_accuracy))
print(
"Training duration of best run: {0:.4g} s".format(
automl.best_config_train_time
)
)
# 1. Get starting points from previous experiments.
starting_points = automl.best_config_per_estimator
print("starting_points", starting_points)
print("loss of the starting_points", automl.best_loss_per_estimator)
starting_point = starting_points["lgbm"]
hps_to_freeze = ["colsample_bytree", "reg_alpha", "reg_lambda", "log_max_bin"]
# 2. Constrct a new class:
# a. write the hps you want to freeze as hps with constant 'domain';
# b. specify the new search space of the other hps accrodingly.
class MyPartiallyFreezedLargeLGBM(LGBMEstimator):
@classmethod
def search_space(cls, **params):
# (1) Get the hps in the original search space
space = LGBMEstimator.search_space(**params)
# (2) Set up the fixed value from hps from the starting point
for hp_name in hps_to_freeze:
# if an hp is specifed to be freezed, use tine value provided in the starting_point
# otherwise use the setting from the original search space
if hp_name in starting_point:
space[hp_name] = {"domain": starting_point[hp_name]}
# (3.1) Configure the search space for hps that are in the original search space
# but you want to change something, for example the range.
revised_hps_to_search = {
"n_estimators": {
"domain": tune.lograndint(lower=10, upper=32768),
"init_value": starting_point.get("n_estimators")
or space["n_estimators"].get("init_value", 10),
"low_cost_init_value": space["n_estimators"].get(
"low_cost_init_value", 10
),
},
"num_leaves": {
"domain": tune.lograndint(lower=10, upper=3276),
"init_value": starting_point.get("num_leaves")
or space["num_leaves"].get("init_value", 10),
"low_cost_init_value": space["num_leaves"].get(
"low_cost_init_value", 10
),
},
# (3.2) Add a new hp which is not in the original search space
"subsample": {
"domain": tune.uniform(lower=0.1, upper=1.0),
"init_value": 0.1,
},
}
space.update(revised_hps_to_search)
return space
new_estimator_name = "large_lgbm"
new_automl = AutoML()
new_automl.add_learner(
learner_name=new_estimator_name, learner_class=MyPartiallyFreezedLargeLGBM
)
automl_settings_resume = {
"time_budget": 3,
"metric": "accuracy",
"task": "classification",
"estimator_list": [new_estimator_name],
"log_file_name": "test/iris_resume.log",
"log_training_metric": True,
"n_jobs": 1,
"model_history": True,
"log_type": "all",
"starting_points": {new_estimator_name: starting_point},
}
new_automl.fit(X_train=X_train, y_train=y_train, **automl_settings_resume)
new_automl_val_accuracy = 1.0 - new_automl.best_loss
print("Best ML leaner:", new_automl.best_estimator)
print("Best hyperparmeter config:", new_automl.best_config)
print(
"Best accuracy on validation data: {0:.4g}".format(new_automl_val_accuracy)
)
print(
"Training duration of best run: {0:.4g} s".format(
new_automl.best_config_train_time
)
)
def test_nobudget(self):
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
print(automl.best_config_per_estimator)
if __name__ == "__main__":
unittest.main()
| 42.372093
| 103
| 0.567508
|
4c5fa94d3913c5cb71aa3191db2a3a06d353f8be
| 298
|
py
|
Python
|
scenarios/card_hold_capture/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
scenarios/card_hold_capture/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
scenarios/card_hold_capture/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
import balanced
balanced.configure('ak-test-1o9QKwUCrwstHWO5sGxICtIJdQXFTjnrV')
card_hold = balanced.CardHold.fetch('/card_holds/HL4io3nFmawRhnkkUWnC1Eoo')
debit = card_hold.capture(
appears_on_statement_as='ShowsUpOnStmt',
description='Some descriptive text for the debit in the dashboard'
)
| 33.111111
| 75
| 0.825503
|
1ad8dae84d84b814a1f056ebc37e851af837e103
| 5,441
|
py
|
Python
|
grr/lib/ipv6_utils.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 5
|
2017-03-17T08:25:09.000Z
|
2022-02-22T05:28:14.000Z
|
grr/lib/ipv6_utils.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/lib/ipv6_utils.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 3
|
2018-12-07T07:04:37.000Z
|
2022-02-22T05:28:16.000Z
|
#!/usr/bin/env python
"""Functions for manipulating ipv6 addresses.
We've written our own versions of socket.inet_pton and inet_ntop for ipv6
because those functions are not available on windows before python 3.4.
"""
import re
import socket
# ntop does not exist on Windows.
# pylint: disable=g-socket-inet-aton,g-socket-inet-ntoa
V4_ENDING = re.compile(r"(?P<v6>.*):(\d+)\.(\d+)\.(\d+)\.(\d+)$")
ZERO_SEQUENCE = re.compile(r"(?:^|\:)(?:0\:?)+")
BAD_SINGLE_COLON = re.compile(r"(^\:[^:].*|.*[^:]\:$)")
def _RemoveV4Ending(addr_string):
"""Replace v4 endings with v6 equivalents."""
match = V4_ENDING.match(addr_string)
if match:
ipv4_addr = ".".join(match.groups()[1:])
try:
socket.inet_aton(ipv4_addr)
except (socket.error, ValueError):
raise socket.error("Illegal IPv4 extension: %s" % addr_string)
if int(match.group(2)) == 0:
raise socket.error("IPv4 can't start with 0")
return "%s:%04x:%04x" % (match.group("v6"),
int(match.group(2)) * 256 + int(match.group(3)),
int(match.group(4)) * 256 + int(match.group(5)))
return addr_string
def _StripLeadingOrTrailingDoubleColons(addr_string):
"""Strip leading or trailing double colon."""
if addr_string.startswith("::"):
return addr_string[1:]
if addr_string.endswith("::"):
return addr_string[:-1]
return addr_string
def _ZeroPad(addr_string):
"""Pad out zeros in each address chunk as necessary."""
chunks = addr_string.split(":")
total_length = len(chunks)
if total_length > 8:
raise socket.error("Too many address chunks in %s, expected 8" %
addr_string)
double_colon = False
addr_array = []
for chunk in chunks:
if chunk:
chunk_len = len(chunk)
if chunk_len > 4:
raise socket.error("Chunk must be length 4: %s" % addr_string)
if chunk_len != 4:
# Pad out with 0's until we have 4 digits
chunk = "0" * (4 - chunk_len) + chunk
addr_array.append(chunk)
else:
if double_colon:
raise socket.error("More than one double colon in %s" % addr_string)
else:
double_colon = True
# Add zeros for the compressed chunks
addr_array.extend(["0000"] * (8 - total_length + 1))
if len(addr_array) != 8:
raise socket.error("Bad address length, expected 8 chunks: %s" % addr_array)
return "".join(addr_array)
def InetPtoN(protocol, addr_string):
"""Convert ipv6 string to packed bytes.
Args:
protocol: socket.AF_INET or socket.AF_INET6
addr_string: IPv6 address string
Returns:
bytestring representing address
Raises:
socket.error: on bad IPv6 address format
"""
if protocol == socket.AF_INET:
return socket.inet_aton(addr_string)
if protocol != socket.AF_INET6:
raise socket.error("Unsupported protocol")
if not addr_string:
raise socket.error("Empty address string")
if BAD_SINGLE_COLON.match(addr_string):
raise socket.error("Start or ends with single colon")
if addr_string == "::":
return ("0" * 32).decode("hex_codec")
addr_string = _RemoveV4Ending(addr_string)
addr_string = _StripLeadingOrTrailingDoubleColons(addr_string)
addr_string = _ZeroPad(addr_string)
try:
return addr_string.decode("hex_codec")
except TypeError:
raise socket.error("Error decoding: %s" % addr_string)
def InetNtoP(protocol, packed_bytes):
"""Convert ipv6 packed bytes to string.
Args:
protocol: protocol
packed_bytes: bytestring
Returns:
ipv6 string
Raises:
socket.error: on bad bytestring
"""
if protocol == socket.AF_INET:
return socket.inet_ntoa(packed_bytes)
if protocol != socket.AF_INET6:
raise socket.error("Unsupported protocol")
if len(packed_bytes) != 16:
raise socket.error("IPv6 addresses are 16 bytes long, got %s for %s" %
(len(packed_bytes), packed_bytes))
hex_encoded = packed_bytes.encode("hex_codec")
# Detect IPv4 endings
if hex_encoded.startswith("00000000000000000000ffff"):
return "::ffff:" + socket.inet_ntoa(packed_bytes[-4:])
# Detect IPv4 endings. If the first quad is 0, it isn't IPv4.
if hex_encoded.startswith("0" * 24) and not hex_encoded.startswith("0" * 28):
return "::" + socket.inet_ntoa(packed_bytes[-4:])
# Split into quads
chunked = [hex_encoded[i:i + 4] for i in xrange(0, len(hex_encoded), 4)]
output = []
for chunk in chunked:
# Strip leading zeros
chunk = "".join(chunk).lstrip("0")
if not chunk:
# Set all 0 chunks to a single 0
chunk = "0"
output.append(chunk)
result_str = ":".join(output)
# Compress with :: by finding longest sequence of zeros that look like :0:0:0
# or 0:0:0 if its the start of the string
matches = ZERO_SEQUENCE.findall(result_str)
if matches:
largest_zero_str = max(matches, key=len)
if len(largest_zero_str) > 3:
# Replace any zero string longer than :0: with ::
result_str = result_str.replace(largest_zero_str, "::", 1)
return result_str
# If the implementation supports it, just use the native functions.
# pylint: disable=invalid-name
# Keep a reference to the custom functions in case we really want them (for
# tests).
CustomInetNtoP = InetNtoP
CustomInetPtoN = InetPtoN
if getattr(socket, "inet_ntop", None):
InetNtoP = socket.inet_ntop
if getattr(socket, "inet_pton", None):
InetPtoN = socket.inet_pton
| 28.78836
| 80
| 0.66826
|
148362edeca1b1ba98c383f0218ff79284293d1a
| 16,271
|
py
|
Python
|
dataset/dataset_shapenet_aug.py
|
nsfzyzz/dispersion-score
|
ac0c633fe3af091e83d2d198809d98545a0a311a
|
[
"MIT"
] | 10
|
2021-12-01T00:55:32.000Z
|
2022-01-02T18:07:04.000Z
|
dataset/dataset_shapenet_aug.py
|
nsfzyzz/dispersion-score
|
ac0c633fe3af091e83d2d198809d98545a0a311a
|
[
"MIT"
] | null | null | null |
dataset/dataset_shapenet_aug.py
|
nsfzyzz/dispersion-score
|
ac0c633fe3af091e83d2d198809d98545a0a311a
|
[
"MIT"
] | 3
|
2021-12-01T00:55:47.000Z
|
2021-12-17T15:18:43.000Z
|
import torch.utils.data as data
import os.path
import torch
import torchvision.transforms as transforms
import numpy as np
import os
from PIL import Image
import sys
sys.path.append("../")
import auxiliary.my_utils as my_utils
from auxiliary.constant import TRAIN_SIZE, TEST_SIZE, MAXVIEW_PER_SHAPE
import pickle
from os.path import join, dirname, exists
from easydict import EasyDict
import json
import math
import logging
from dataset.vc_generator import camera_info, plot
import dataset.pointcloud_processor as pointcloud_processor
from copy import deepcopy
import tqdm
import random
from pathlib import Path
class ShapeNet(data.Dataset):
"""
Shapenet Dataloader
Uses Shapenet V1
"""
def __init__(self, opt, train=True, num_image_per_object=1, rand_view_select=False):
self.opt = opt
if opt.no_compile_chamfer:
self.num_sample = opt.number_points if train else opt.number_points_eval
else:
self.num_sample = opt.number_points if train else 2500
self.train = train
self.init_normalization()
self.init_singleview()
assert num_image_per_object <= 24, 'ShapeNet 13 R2N2 rendering only have 24 views per shape'
if not opt.demo:
self.opt.logger.info('Create Shapenet Dataset Train Set...') if train else self.opt.logger.info('Create Shapenet Dataset Test Set...')
# Define core path array
self.datapath = []
self.category_datapath = {}
# Load classes
self.pointcloud_path = join(dirname(__file__), 'data/ShapeNetV1PointCloud')
self.image_path = join(dirname(__file__), 'data/ShapeNetV1Renderings')
# Load taxonomy file
self.taxonomy_path = join(dirname(__file__), 'data/taxonomy.json')
if not exists(self.taxonomy_path):
os.system("chmod +x dataset/download_shapenet_pointclouds.sh")
os.system("./dataset/download_shapenet_pointclouds.sh")
self.classes = [x for x in next(os.walk(self.pointcloud_path))[1]]
with open(self.taxonomy_path, 'r') as f:
self.taxonomy = json.load(f)
self.id2names = {}
self.names2id = {}
for dict_class in self.taxonomy:
if dict_class['synsetId'] in self.classes:
name = dict_class['name'].split(sep=',')[0]
self.id2names[dict_class['synsetId']] = name
self.names2id[name] = dict_class['synsetId']
# Select classes
if opt.shapenet13:
opt.class_choice = ["airplane", "bench", "cabinet", "car", "chair", "display", "lamp", "loudspeaker",
"rifle", "sofa", "table", "telephone", "vessel"]
if len(opt.class_choice) > 0:
new_classes = []
for category in opt.class_choice:
new_classes.append(self.names2id[category])
self.classes = new_classes
# Create Cache path
self.path_dataset = join(dirname(__file__), 'data', 'cache')
if not exists(self.path_dataset):
os.mkdir(self.path_dataset)
self.path_dataset = join(self.path_dataset,
self.opt.normalization + str(train) + "_".join(self.opt.class_choice))
if not exists(self.image_path):
os.system("chmod +x dataset/download_shapenet_renderings.sh")
os.system("./dataset/download_shapenet_renderings.sh")
self.num_image_per_object = num_image_per_object
self.idx_image_val = 0
# if rand_view_select:
# opt.logger.info("Randam Views Selection")
#--------------------------------------------------------------------------#
# Compile list of pointcloud path by selected category
for category in self.classes:
dir_pointcloud = join(self.pointcloud_path, category)
dir_image = join(self.image_path, category)
list_pointcloud = sorted(os.listdir(dir_pointcloud))
if self.train:
list_pointcloud = list_pointcloud[:int(len(list_pointcloud) * 0.8)]
else:
list_pointcloud = list_pointcloud[int(len(list_pointcloud) * 0.8):]
self.opt.logger.info(
' category '
+ category
+ " "
+ self.id2names[category]
+ ' Number Files :'
+ str(len(list_pointcloud))
)
if len(list_pointcloud) != 0:
self.category_datapath[category] = []
for pointcloud in list_pointcloud:
pointcloud_path = join(dir_pointcloud, pointcloud) #data/ShapeNetV1PointCloud/04530566/ffffe224db39febe288b05b36358465d.points.ply.npy
image_folder = join(dir_image, pointcloud.split(".")[0], "rendering")
view_path = os.path.join(image_folder, 'rendering_metadata.txt')
cam_params = np.loadtxt(view_path)
# if rand_view_select:
# for _ in range(self.num_image_per_object):
# if train:
# view_idx = np.random.randint(1, MAXVIEW_PER_SHAPE) # [0, 24)
# else:
# view_idx = self.idx_image_val # from AtlasNet dataset codebase [1, 24)
# cam_rotmat, _ = camera_info(cam_params[view_idx])
# image_path = join(image_folder, ShapeNet.int2str(view_idx) + ".png")
# if not self.opt.SVR or exists(image_path):
# self.category_datapath[category].append((pointcloud_path, image_path, pointcloud, category, cam_rotmat))
# else:
# self.opt.logger.info(f"Rendering not found : {image_path}")
# else:
if train:
for v_idx in range(1, self.num_image_per_object + 1):
cam_rotmat, _ = camera_info(cam_params[v_idx])
image_path = join(image_folder, ShapeNet.int2str(v_idx) + ".png")
if not self.opt.SVR or exists(image_path):
self.category_datapath[category].append((pointcloud_path, image_path, pointcloud, category, cam_rotmat))
else:
self.opt.logger.info(f"Rendering not found : {image_path}")
else:
cam_rotmat, _ = camera_info(cam_params[self.idx_image_val])
image_path = join(image_folder, ShapeNet.int2str(self.idx_image_val) + ".png")
if not self.opt.SVR or exists(image_path):
self.category_datapath[category].append((pointcloud_path, image_path, pointcloud, category, cam_rotmat))
else:
self.opt.logger.info(f"Rendering not found : {image_path}")
# Add all retained path to a global vector
for item in self.classes:
for pointcloud in self.category_datapath[item]:
self.datapath.append(pointcloud)
#------------------------------------------------------------------------------#
# Preprocess and cache files
self.preprocess()
#self.preprocess_wo_cached()
def preprocess(self):
if exists(self.path_dataset + "info.pkl"):
# Reload dataset
self.opt.logger.info(f"Reload dataset : {self.path_dataset}")
with open(self.path_dataset + "info.pkl", "rb") as fp:
self.data_metadata = pickle.load(fp)
self.data_points = torch.load(self.path_dataset + "points.pth")
else:
# Preprocess dataset and put in cache for future fast reload
self.opt.logger.info("preprocess dataset...")
self.datas = [self._getitem(i) for i in range(self.__len__())]
# Concatenate all proccessed files
self.data_points = [a[0] for a in self.datas]
self.data_points = torch.cat(self.data_points, 0)
self.data_metadata = [{'pointcloud_path': a[1], 'image_path': a[2], 'name': a[3], 'category': a[4], 'cam_rotmat': a[5]} for a in
self.datas]
# Save in cache
with open(self.path_dataset + "info.pkl", "wb") as fp: # Pickling
pickle.dump(self.data_metadata, fp)
torch.save(self.data_points, self.path_dataset + "points.pth")
#self.opt.logger.info("Dataset Size: " + str(len(self.data_metadata)))
self.opt.logger.info(f"Dataset Shape Size: {len(self.data_metadata)} Sample Size: {len(self.datapath)}")
self.opt.logger.info("###############################################################")
def preprocess_wo_cached(self):
self.opt.logger.info("preprocess dataset w/o cached...")
self.datas = [self._getitem(i) for i in range(self.__len__())]
# Concatenate all proccessed files
self.data_points = [a[0] for a in self.datas]
self.data_points = torch.cat(self.data_points, 0)
self.data_metadata = [{'pointcloud_path': a[1], 'image_path': a[2], 'name': a[3], 'category': a[4]} for a in
self.datas]
self.opt.logger.info("Dataset Size: " + str(len(self.data_metadata)))
def init_normalization(self):
if not self.opt.demo:
self.opt.logger.info("Dataset normalization : " + self.opt.normalization)
if self.opt.normalization == "UnitBall":
self.normalization_function = pointcloud_processor.Normalization.normalize_unitL2ball_functional
elif self.opt.normalization == "BoundingBox":
self.normalization_function = pointcloud_processor.Normalization.normalize_bounding_box_functional
else:
self.normalization_function = pointcloud_processor.Normalization.identity_functional
def init_singleview(self):
## Define Image Transforms
if self.opt.img_aug:
self.opt.logger.info("SVR TASK: random img crop applied in Training!")
else:
self.opt.logger.info("SVR TASK: NO img crop applied")
self.transforms = transforms.Compose([
transforms.Resize(size=224, interpolation=2),
transforms.ToTensor(),
])
# RandomResizedCrop or RandomCrop
self.dataAugmentation = transforms.Compose([
transforms.RandomCrop(127),
transforms.RandomHorizontalFlip(),
])
self.validating = transforms.Compose([
transforms.CenterCrop(127),
])
def _getitem(self, index):
pointcloud_path, image_path, pointcloud, category, cam_rotmat = self.datapath[index]
points = np.load(pointcloud_path)
points = torch.from_numpy(points).float()
points[:, :3] = self.normalization_function(points[:, :3])
return points.unsqueeze(0), pointcloud_path, image_path, pointcloud, category, cam_rotmat
#Origin AtlasNet version
def __getitem__(self, index):
#return_dict = deepcopy(self.data_metadata[index])
sample_dic = self.datapath[index]
return_dict = { 'pointcloud_path': sample_dic[0],
'image_path': sample_dic[1],
'name': sample_dic[2],
'category': sample_dic[3],
'cam_rotmat': torch.from_numpy(sample_dic[4]).float()}
# Point processing
points = self.data_points[math.floor(index / self.num_image_per_object)]
points = points.clone()
if self.opt.sample:
choice = np.random.choice(points.size(0), self.num_sample, replace=True)
points = points[choice, :]
return_dict['points'] = points[:, :3].contiguous()
# Image processing
if self.opt.SVR:
if self.train:
im = Image.open(return_dict['image_path'])
if self.opt.img_aug:
im = self.dataAugmentation(im) # random crop
else:
im = self.validating(im)
else:
im = Image.open(return_dict['image_path'])
im = self.validating(im) # center crop
im = self.transforms(im) # scale
im = im[:3, :, :]
return_dict['image'] = im
return return_dict
def __len__(self):
return len(self.datapath)
@staticmethod
def int2str(N):
if N < 10:
return "0" + str(N)
else:
return str(N)
def load(self, path):
ext = path.split('.')[-1]
if ext == "npy" or ext == "ply" or ext == "obj":
return self.load_point_input(path)
else:
return self.load_image(path)
def load_point_input(self, path):
ext = path.split('.')[-1]
if ext == "npy":
points = np.load(path)
elif ext == "ply" or ext == "obj":
import pymesh
points = pymesh.load_mesh(path).vertices
else:
self.opt.logger.info("invalid file extension")
points = torch.from_numpy(points).float()
operation = pointcloud_processor.Normalization(points, keep_track=True)
if self.opt.normalization == "UnitBall":
operation.normalize_unitL2ball()
elif self.opt.normalization == "BoundingBox":
operation.normalize_bounding_box()
else:
pass
return_dict = {
'points': points,
'operation': operation,
'path': path,
}
return return_dict
def load_image(self, path):
im = Image.open(path)
im = self.validating(im)
im = self.transforms(im)
im = im[:3, :, :]
return_dict = {
'image': im.unsqueeze_(0),
'operation': None,
'path': path,
}
return return_dict
if __name__ == '__main__':
print('Testing Shapenet dataset')
opt = {"normalization": "UnitBall", "class_choice": ["plane"], "SVR": True, "sample": True, "npoints": 2500,
"shapenet13": True, "demo": False, "logger": logging.getLogger(), "img_aug": False, "manual_seed": 1}
opt = EasyDict(opt)
my_utils.plant_seeds(opt.manual_seed)
dataset = ShapeNet(opt, train=False, num_image_per_object=1)
'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=8,
shuffle=False,
num_workers=16)
batch_sample_idx = random.sample(range(len(dataloader)), k=10)
print(batch_sample_idx)
for batch_idx, batch in tqdm.tqdm(enumerate(dataloader), total=len(dataloader)):
if batch_idx in batch_sample_idx:
pts = batch['points']
cam_rotmat = batch['cam_rotmat']
images = batch['image']
image_paths = batch['image_path']
pt_trans = torch.bmm(pts, torch.transpose(cam_rotmat, 1, 2))
for pt_idx in range(pt_trans.shape[0]):
res_path = f'data/vc_res_rand_vselect/bt{batch_idx}_pt{pt_idx}.png'
res_folder = os.path.split(res_path)[0]
Path(res_folder).mkdir(parents=True, exist_ok=True)
plot(pt_trans[pt_idx].numpy(), np.transpose(images[pt_idx].numpy(), (1, 2, 0)),
title=image_paths[pt_idx][-12:], ifsave=True, path=res_path, scale=0.8)
'''
# for i in tqdm.tqdm(range(len(d)), total=len(d)):
# a = d[i]
| 43.505348
| 164
| 0.554729
|
eba6bf892d0a6227587dc0ec3d53c009177616a4
| 10,066
|
py
|
Python
|
pyscf/fci/test/test_rdm.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-05-02T19:55:30.000Z
|
2018-05-02T19:55:30.000Z
|
pyscf/fci/test/test_rdm.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/fci/test/test_rdm.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-12-06T03:10:50.000Z
|
2018-12-06T03:10:50.000Z
|
from functools import reduce
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
norb = 6
nelec = 6
na = fci.cistring.num_strings(norb, nelec//2)
numpy.random.seed(1)
ci0 = numpy.random.random((na,na))
ci0 = ci0 + ci0.T
rdm1, rdm2 = fci.direct_spin1.make_rdm12(ci0, norb, nelec)
class KnowValues(unittest.TestCase):
def test_rdm3(self):
dm3ref = make_dm3_o0(ci0, norb, nelec)
dm1, dm2, dm3 = fci.rdm.make_dm123('FCI3pdm_kern_spin0', ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm3ref, dm3))
dm3a = reorder_dm123_o0(dm1, dm2, dm3, False)[2]
dm3b = fci.rdm.reorder_dm123(dm1, dm2, dm3, False)[2]
self.assertTrue(numpy.allclose(dm3a, dm3b))
dm3 = dm3b
fac = 1. / (nelec-2)
self.assertTrue(numpy.allclose(rdm2, numpy.einsum('ijklmm->ijkl',dm3)*fac))
self.assertTrue(numpy.allclose(rdm2, numpy.einsum('ijmmkl->ijkl',dm3)*fac))
self.assertTrue(numpy.allclose(rdm2, numpy.einsum('mmijkl->ijkl',dm3)*fac))
dm3 = fci.rdm.make_dm123('FCI3pdm_kern_sf', ci0, ci0, norb, nelec)[2]
dm2 = fci.direct_spin1.make_rdm12(ci0, norb, nelec, reorder=False)[1]
self.assertTrue(numpy.allclose(dm2, numpy.einsum('mmijkl->ijkl',dm3)/nelec))
numpy.random.seed(2)
na = fci.cistring.num_strings(norb, 5)
nb = fci.cistring.num_strings(norb, 3)
ci1 = numpy.random.random((na,nb))
dm3ref = make_dm3_o0(ci1, norb, (5,3))
dm3 = fci.rdm.make_dm123('FCI3pdm_kern_sf', ci1, ci1, norb, (5,3))[2]
self.assertTrue(numpy.allclose(dm3ref, dm3))
def test_dm4(self):
dm4ref = make_dm4_o0(ci0, norb, nelec)
dm4 = fci.rdm.make_dm1234('FCI4pdm_kern_sf', ci0, ci0, norb, nelec)[3]
self.assertTrue(numpy.allclose(dm4ref, dm4))
numpy.random.seed(2)
na = fci.cistring.num_strings(norb, 5)
nb = fci.cistring.num_strings(norb, 3)
ci1 = numpy.random.random((na,nb))
dm4ref = make_dm4_o0(ci1, norb, (5,3))
dm1, dm2, dm3, dm4 = fci.rdm.make_dm1234('FCI4pdm_kern_sf', ci1, ci1, norb, (5,3))
self.assertTrue(numpy.allclose(dm4ref, dm4))
self.assertTrue(numpy.allclose(dm3, numpy.einsum('ppmnijkl->mnijkl',dm4)/8))
self.assertTrue(numpy.allclose(dm3, numpy.einsum('mnppijkl->mnijkl',dm4)/8))
self.assertTrue(numpy.allclose(dm3, numpy.einsum('mnijppkl->mnijkl',dm4)/8))
self.assertTrue(numpy.allclose(dm3, numpy.einsum('mnijklpp->mnijkl',dm4)/8))
dm3a, dm4a = reorder_dm1234_o0(dm1, dm2, dm3, dm4, False)[2:]
dm4b = fci.rdm.reorder_dm1234(dm1, dm2, dm3, dm4, False)[3]
self.assertTrue(numpy.allclose(dm4a, dm4b))
self.assertTrue(numpy.allclose(dm3a, numpy.einsum('ppmnijkl->mnijkl',dm4b)/5))
self.assertTrue(numpy.allclose(dm3a, numpy.einsum('mnppijkl->mnijkl',dm4b)/5))
self.assertTrue(numpy.allclose(dm3a, numpy.einsum('mnijppkl->mnijkl',dm4b)/5))
self.assertTrue(numpy.allclose(dm3a, numpy.einsum('mnijklpp->mnijkl',dm4b)/5))
def test_tdm2(self):
dm1 = numpy.einsum('ij,ijkl->kl', ci0, _trans1(ci0, norb, nelec))
self.assertTrue(numpy.allclose(rdm1, dm1))
dm2 = numpy.einsum('ij,ijklmn->klmn', ci0, _trans2(ci0, norb, nelec))
dm2 = fci.rdm.reorder_rdm(rdm1, dm2)[1]
self.assertTrue(numpy.allclose(rdm2,dm2))
na = ci0.shape[0]
numpy.random.seed(1)
ci = numpy.random.random((na,na))
ci1 = numpy.random.random((na,na))
dm1, dm2 = fci.direct_spin1.trans_rdm12(ci, ci1, norb, nelec)
numpy.random.seed(2)
self.assertAlmostEqual(numpy.dot(dm2.flatten(),numpy.random.random(dm2.size)),
3790.8867819690477, 7)
self.assertTrue(numpy.allclose(dm2, dm2.transpose(2,3,0,1)))
t1 = _trans1(ci1, norb, nelec)
t2 = _trans2(ci1, norb, nelec)
dm1a = numpy.einsum('ij,ijpq->pq', ci, t1)
dm2a = numpy.einsum('ij,ijpqrs->pqrs', ci, t2)
self.assertTrue(numpy.allclose(dm1a, dm1))
dm1a, dm2a = fci.rdm.reorder_rdm(dm1a, dm2a)
self.assertTrue(numpy.allclose(dm2a,dm2a.transpose(2,3,0,1)))
def test_full_alpha(self):
nelec = (6,3)
norb = 6
npair = norb*(norb+1)/2
numpy.random.seed(12)
h1 = numpy.random.random((norb,norb))
h1 = h1 + h1.T
h2 = numpy.random.random((npair,npair)) * .1
h2 = h2 + h2.T
cis = fci.direct_spin1.FCI()
e, c = cis.kernel(h1, h2, norb, nelec, verbose=5)
dm1s, dm2s = cis.make_rdm12s(c, norb, nelec)
self.assertAlmostEqual(abs(dm1s[0]).sum(), 6, 9)
self.assertAlmostEqual(dm1s[1].trace(), 3, 9)
self.assertAlmostEqual(abs(dm2s[0]).sum(), 60, 9)
self.assertAlmostEqual(abs(numpy.einsum('iijk->jk', dm2s[1])/6-dm1s[1]).sum(), 0, 9)
self.assertAlmostEqual(abs(numpy.einsum('iijk->jk', dm2s[2])/2-dm1s[1]).sum(), 0, 9)
def test_0beta(self):
nelec = (3,0)
norb = 6
npair = norb*(norb+1)/2
numpy.random.seed(12)
h1 = numpy.random.random((norb,norb))
h1 = h1 + h1.T
h2 = numpy.random.random((npair,npair)) * .1
h2 = h2 + h2.T
cis = fci.direct_spin1.FCI()
e, c = cis.kernel(h1, h2, norb, nelec, verbose=5)
dm1s, dm2s = cis.make_rdm12s(c, norb, nelec)
self.assertAlmostEqual(dm1s[0].trace(), 3, 9)
self.assertAlmostEqual(abs(dm1s[1]).sum(), 0, 9)
self.assertAlmostEqual(abs(numpy.einsum('iijk->jk', dm2s[0])/2-dm1s[0]).sum(), 0, 9)
self.assertAlmostEqual(abs(dm2s[1]).sum(), 0, 9)
self.assertAlmostEqual(abs(dm2s[2]).sum(), 0, 9)
# (6o,6e) ~ 4MB
# (8o,8e) ~ 153MB
# (10o,10e) ~ 4.8GB
# t2(*,ij,kl) = E_i^j E_k^l|0>
def _trans2(fcivec, norb, nelec):
if isinstance(nelec, (int, numpy.integer)):
neleca = nelecb = nelec//2
else:
neleca, nelecb = nelec
link_indexa = fci.cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = fci.cistring.gen_linkstr_index(range(norb), nelecb)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
fcivec = fcivec.reshape(na,nb)
t1 = _trans1(fcivec, norb, nelec)
t2 = numpy.zeros((na,nb,norb,norb,norb,norb))
for str0, tab in enumerate(link_indexa):
for a, i, str1, sign in tab:
t2[str1,:,a,i] += sign * t1[str0]
for k in range(na):
for str0, tab in enumerate(link_indexb):
for a, i, str1, sign in tab:
t2[k,str1,a,i] += sign * t1[k,str0]
return t2
def _trans1(fcivec, norb, nelec):
if isinstance(nelec, (int, numpy.integer)):
neleca = nelecb = nelec//2
else:
neleca, nelecb = nelec
link_indexa = fci.cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = fci.cistring.gen_linkstr_index(range(norb), nelecb)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
fcivec = fcivec.reshape(na,nb)
t1 = numpy.zeros((na,nb,norb,norb))
for str0, tab in enumerate(link_indexa):
for a, i, str1, sign in tab:
t1[str1,:,a,i] += sign * fcivec[str0]
for k in range(na):
for str0, tab in enumerate(link_indexb):
for a, i, str1, sign in tab:
t1[k,str1,a,i] += sign * fcivec[k,str0]
return t1
#
# NOTE: this rdm3 is defined as
# rdm3(p,q,r,s,t,u) = <p^+ q r^+ s t^+ u>
def make_dm3_o0(fcivec, norb, nelec):
# <0|p^+ q r^+ s|i> <i|t^+ u|0>
t1 = _trans1(fcivec, norb, nelec)
t2 = _trans2(fcivec, norb, nelec)
na, nb = t1.shape[:2]
rdm3 = numpy.dot(t1.reshape(na*nb,-1).T, t2.reshape(na*nb,-1))
return rdm3.reshape((norb,)*6).transpose(1,0,2,3,4,5)
def make_dm4_o0(fcivec, norb, nelec):
# <0|p^+ q r^+ s|i> <i|t^+ u|0>
t2 = _trans2(fcivec, norb, nelec)
na, nb = t2.shape[:2]
rdm4 = numpy.dot(t2.reshape(na*nb,-1).T, t2.reshape(na*nb,-1))
return rdm4.reshape((norb,)*8).transpose(3,2,1,0,4,5,6,7)
# <p^+ q r^+ s t^+ u> => <p^+ r^+ t^+ u s q>
# rdm2 is <p^+ q r^+ s>
def reorder_dm123_o0(rdm1, rdm2, rdm3, inplace=True):
rdm1, rdm2 = fci.rdm.reorder_rdm(rdm1, rdm2, inplace)
if not inplace:
rdm3 = rdm3.copy()
norb = rdm1.shape[0]
for p in range(norb):
for q in range(norb):
for s in range(norb):
rdm3[p,q,q,s] += -rdm2[p,s]
for u in range(norb):
rdm3[p,q,:,:,q,u] += -rdm2[p,u]
for s in range(norb):
rdm3[p,q,:,s,s,:] += -rdm2[p,q]
for q in range(norb):
for s in range(norb):
rdm3[:,q,q,s,s,:] += -rdm1
return rdm1, rdm2, rdm3
# <p^+ q r^+ s t^+ u w^+ v> => <p^+ r^+ t^+ w^+ v u s q>
# rdm2, rdm3 are the (reordered) standard 2-pdm and 3-pdm
def reorder_dm1234_o0(rdm1, rdm2, rdm3, rdm4, inplace=True):
rdm1, rdm2, rdm3 = fci.rdm.reorder_dm123(rdm1, rdm2, rdm3, inplace)
if not inplace:
rdm4 = rdm4.copy()
norb = rdm1.shape[0]
delta = numpy.eye(norb)
rdm4 -= numpy.einsum('qv,pwrstu->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('sv,pqrwtu->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('uv,pqrstw->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('qt,pursvw->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('st,pqruvw->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('qr,pstuvw->pqrstuvw', delta, rdm3)
rdm4 -= numpy.einsum('qr,sv,pwtu', delta, delta, rdm2)
rdm4 -= numpy.einsum('qr,uv,pstw', delta, delta, rdm2)
rdm4 -= numpy.einsum('qt,uv,pwrs', delta, delta, rdm2)
rdm4 -= numpy.einsum('qt,sv,purw', delta, delta, rdm2)
rdm4 -= numpy.einsum('st,qv,pwru', delta, delta, rdm2)
rdm4 -= numpy.einsum('st,uv,pqrw', delta, delta, rdm2)
rdm4 -= numpy.einsum('qr,st,puvw', delta, delta, rdm2)
rdm4 -= numpy.einsum('qr,st,uv,pw->pqrstuvw', delta, delta, delta, rdm1)
return rdm1, rdm2, rdm3, rdm4
if __name__ == "__main__":
print("Full Tests for fci.rdm")
unittest.main()
| 41.423868
| 92
| 0.606298
|
db26cc130d967632c2c4bc5dc861b68df1ac89ac
| 8,474
|
py
|
Python
|
btsprice/task_exchanges.py
|
pch957/btsprice
|
8a6913dfc0d74e668e116855ea8bb1caf3af6c04
|
[
"MIT"
] | 18
|
2016-09-16T16:07:35.000Z
|
2020-08-03T13:14:56.000Z
|
btsprice/task_exchanges.py
|
roelandp/btsprice
|
ad2f4d6d694a4ac71d5b227a22731160f700323b
|
[
"MIT"
] | 5
|
2017-08-31T00:14:02.000Z
|
2019-10-18T12:44:22.000Z
|
btsprice/task_exchanges.py
|
roelandp/btsprice
|
ad2f4d6d694a4ac71d5b227a22731160f700323b
|
[
"MIT"
] | 20
|
2016-06-27T09:46:18.000Z
|
2020-10-26T05:17:47.000Z
|
# -*- coding: utf-8 -*-
from btsprice.exchanges import Exchanges
from btsprice.yahoo import Yahoo
from btsprice.sina import Sina
import time
import asyncio
class TaskExchanges(object):
def __init__(self, data={}):
self.period = 120
self.exchanges = Exchanges()
self.yahoo = Yahoo()
self.sina = Sina()
self.handler = None
data_type = ["orderbook", "ticker", "rate"]
for _type in data_type:
if _type not in data:
data[_type] = {}
self.data = data
def set_period(self, sec):
self.period = sec
@asyncio.coroutine
def fetch_orderbook(self, name, quote, coro, *args):
time_end = int(time.time())
orderbook = self.data["orderbook"]
while True:
time_begin = time_end
_orderbook = yield from coro(*args)
time_end = int(time.time())
if _orderbook:
orderbook[name] = _orderbook
orderbook[name]["quote"] = quote
if "time" not in _orderbook:
orderbook[name]["time"] = time_end
if self.handler:
self.handler("orderbook", name, orderbook[name])
time_left = self.period - (time_end - time_begin)
if time_left <= 1:
time_left = 1
time_end += time_left
yield from asyncio.sleep(time_left)
@asyncio.coroutine
def fetch_ticker(self, name, quote, coro, *args):
time_end = int(time.time())
ticker = self.data["ticker"]
while True:
time_begin = time_end
_ticker = yield from coro(*args)
time_end = int(time.time())
if _ticker:
_ticker["quote"] = quote
if "time" not in _ticker:
_ticker["time"] = time_end
ticker[name] = _ticker
if self.handler:
self.handler("ticker", name, _ticker)
time_left = self.period - (time_end - time_begin)
if time_left <= 1:
time_left = 1
time_end += time_left
yield from asyncio.sleep(time_left)
@asyncio.coroutine
def fetch_yahoo_rate(self):
time_end = int(time.time())
rate = self.data["rate"]
while True:
time_begin = time_end
_rate = yield from self.yahoo.fetch_price()
time_end = int(time.time())
if _rate:
_rate["time"] = time_end
rate["yahoo"] = _rate
if self.handler:
self.handler("rate", "yahoo", _rate)
time_left = self.period - (time_end - time_begin)
if time_left <= 1:
time_left = 1
time_end += time_left
yield from asyncio.sleep(time_left)
@asyncio.coroutine
def fetch_sina_rate(self):
time_end = int(time.time())
rate = self.data["rate"]
while True:
time_begin = time_end
_rate = yield from self.sina.fetch_price()
time_end = int(time.time())
if _rate:
_rate["time"] = time_end
rate["Sina"] = _rate
if self.handler:
self.handler("rate", "Sina", _rate)
time_left = self.period - (time_end - time_begin)
if time_left <= 1:
time_left = 1
time_end += time_left
yield from asyncio.sleep(time_left)
def run_tasks_ticker(self, loop):
return [
loop.create_task(self.fetch_ticker(
"poloniex", "USD",
self.exchanges.ticker_poloniex, "USDT", "BTC")),
# loop.create_task(self.fetch_ticker(
# "btce", "USD",
# self.exchanges.ticker_btce, "usd", "btc")),
loop.create_task(self.fetch_ticker(
"bitstamp", "USD",
self.exchanges.ticker_bitstamp, "usd", "btc")),
loop.create_task(self.fetch_ticker(
"gdax", "USD",
self.exchanges.ticker_gdax, "usd", "btc")),
# loop.create_task(self.fetch_ticker(
# "btcchina", "CNY",
# self.exchanges.ticker_btcchina, "cny", "btc")),
# loop.create_task(self.fetch_ticker(
# "huobi", "CNY",
# self.exchanges.ticker_huobi, "btc")),
# loop.create_task(self.fetch_ticker(
# "okcoin_cn", "CNY",
# self.exchanges.ticker_okcoin_cn, "cny", "btc")),
loop.create_task(self.fetch_ticker(
"okcoin_com", "USD",
self.exchanges.ticker_okcoin_com, "usd", "btc")),
loop.create_task(self.fetch_ticker(
"bitfinex", "USD",
self.exchanges.ticker_bitfinex, "usd", "btc")),
loop.create_task(self.fetch_ticker(
"kraken", "EUR",
self.exchanges.ticker_kraken, "eur", "btc")),
loop.create_task(self.fetch_ticker(
"bitflyer_usd", "USD",
self.exchanges.ticker_bitflyer, "usd", "btc")),
loop.create_task(self.fetch_ticker(
"bitflyer_jpy", "JPY",
self.exchanges.ticker_bitflyer, "jpy", "btc")),
]
def run_tasks_orderbook(self, loop):
return [
loop.create_task(self.fetch_orderbook(
"btsbots_cny", "CNY",
self.exchanges.orderbook_btsbots, "CNY", "BTS")),
loop.create_task(self.fetch_orderbook(
"btsbots_usd", "USD",
self.exchanges.orderbook_btsbots, "USD", "BTS")),
loop.create_task(self.fetch_orderbook(
"btsbots_open.btc", "BTC",
self.exchanges.orderbook_btsbots, "OPEN.BTC", "BTS")),
loop.create_task(self.fetch_orderbook(
"aex_btc", "BTC",
self.exchanges.orderbook_aex, "btc", "bts")),
loop.create_task(self.fetch_orderbook(
"zb_btc", "BTC",
self.exchanges.orderbook_zb, "btc", "bts")),
loop.create_task(self.fetch_orderbook(
"zb_usdt", "USD",
self.exchanges.orderbook_zb, "usdt", "bts")),
loop.create_task(self.fetch_orderbook(
"lbank_btc", "BTC",
self.exchanges.orderbook_lbank, "btc", "bts")),
loop.create_task(self.fetch_orderbook(
"binance_btc", "BTC",
self.exchanges.orderbook_binance, "btc", "bts")),
loop.create_task(self.fetch_orderbook(
"poloniex_btc", "BTC",
self.exchanges.orderbook_poloniex, "btc", "bts"))
# loop.create_task(self.fetch_orderbook(
# "yunbi_cny", "CNY",
# self.exchanges.orderbook_yunbi, "cny", "bts")),
# loop.create_task(self.fetch_orderbook(
# "jubi_cny", "CNY",
# self.exchanges.orderbook_jubi, "cny", "bts")),
# loop.create_task(self.fetch_orderbook(
# "19800_cny", "CNY",
# self.exchanges.orderbook_19800, "cny", "bts")),
# loop.create_task(self.fetch_orderbook(
# "bittrex_btc", "BTC",
# self.exchanges.orderbook_bittrex, "btc", "bts")),
]
def run_tasks(self, loop):
return [
loop.create_task(self.fetch_yahoo_rate()),
loop.create_task(self.fetch_sina_rate())
] + \
self.run_tasks_orderbook(loop) + \
self.run_tasks_ticker(loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
task_exchanges = TaskExchanges()
task_exchanges.set_period(20)
tasks = task_exchanges.run_tasks(loop)
@asyncio.coroutine
def task_display():
my_data = task_exchanges.data
while True:
for _type in my_data:
for _name in my_data[_type]:
if "done" not in my_data[_type][_name]:
print("got %s: %s" % (_type, _name))
my_data[_type][_name]["done"] = None
yield from asyncio.sleep(1)
tasks += [loop.create_task(task_display())]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_forever()
loop.close()
| 39.050691
| 70
| 0.523956
|
33467d45cd4e0000769854a2d006581364c6400c
| 487
|
py
|
Python
|
tests/journal.api/cout.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/journal.api/cout.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | 1
|
2021-06-10T23:42:13.000Z
|
2021-06-10T23:42:13.000Z
|
tests/journal.api/cout.py
|
jlmaurer/pyre
|
6af38a83621d7d6228d147b4bb94f97fbb10f6e2
|
[
"BSD-3-Clause"
] | 2
|
2020-08-31T18:07:52.000Z
|
2021-12-10T08:54:39.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2020 all rights reserved
def test():
"""
Verify that the console is accessible
"""
# access
import journal
# instantiate the console
console = journal.cout()
# verify its name is what we expect
assert console.name == "cout"
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 15.709677
| 53
| 0.605749
|
2163f91f322dec2ee3a35e49c9d5c3f280532657
| 3,606
|
py
|
Python
|
unit_tests/data/test_process_message_data_valid.py
|
LandRegistry/maintain-feeder
|
b574dc56fe8d07d8842d7a9caa5bb479a79daae3
|
[
"MIT"
] | null | null | null |
unit_tests/data/test_process_message_data_valid.py
|
LandRegistry/maintain-feeder
|
b574dc56fe8d07d8842d7a9caa5bb479a79daae3
|
[
"MIT"
] | null | null | null |
unit_tests/data/test_process_message_data_valid.py
|
LandRegistry/maintain-feeder
|
b574dc56fe8d07d8842d7a9caa5bb479a79daae3
|
[
"MIT"
] | 1
|
2021-04-11T05:25:00.000Z
|
2021-04-11T05:25:00.000Z
|
process_message_null = {
'entry-timestamp': None,
'action-type': 'NEW',
'item-signature': None,
'entry-number': 19,
'key': None,
'item-hash': None
}
process_message_valid = {
"item-hash": "test",
"item-signature": "test",
"key": "12345678",
"action-type": "NEW",
"entry-number": 19,
"entry-timestamp": "2017-04-27 11:09:28.996676",
"item": {
"geometry": {
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"crs": {
"type": "name",
"properties": {
"name": "EPSG:27700"
}
},
"type": "Polygon",
"coordinates": [[[294915.40831620456, 93347.98452708151],
[294908.2551678813, 93313.65262390542],
[294917.2574326514, 93310.89385218851],
[294924.7282665854, 93345.54135279212],
[294915.40831620456, 93347.98452708151]]]
},
"properties": {
"id": "26"
}
}]
},
"instrument": "Deed",
"start-date": "2017-01-01",
"charge-type": "Planning",
"local-land-charge": "12345704",
"registration-date": "2017-01-01",
"statutory-provision": "Planning Act",
"charge-creation-date": "2014-07-20",
"originating-authority": "ExeterCityCouncil",
"further-information-location": "local-land-charges@exeter.gov.uk",
"charge-geographic-description": "Exeter-220001",
"further-information-reference": "PLA/220026"
}
}
process_message_valid_vary = {
"item-hash": "test",
"item-signature": "test",
"key": "12345678",
"action-type": "UPDATED",
"entry-number": 19,
"item-changes": {
"something": "changed"
},
"entry-timestamp": "2017-04-27 11:09:28.996676",
"item": {
"geometry": {
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"crs": {
"type": "name",
"properties": {
"name": "EPSG:27700"
}
},
"type": "Polygon",
"coordinates": [[[294915.40831620456, 93347.98452708151],
[294908.2551678813, 93313.65262390542],
[294917.2574326514, 93310.89385218851],
[294924.7282665854, 93345.54135279212],
[294915.40831620456, 93347.98452708151]]]
},
"properties": {
"id": "26"
}
}]
},
"instrument": "Deed",
"start-date": "2017-01-01",
"end-date": "2018-01-01",
"charge-type": "Planning",
"local-land-charge": "12345704",
"registration-date": "2017-01-01",
"statutory-provision": "Planning Act",
"charge-creation-date": "2014-07-20",
"originating-authority": "ExeterCityCouncil",
"further-information-location": "local-land-charges@exeter.gov.uk",
"charge-geographic-description": "Exeter-220001",
"further-information-reference": "PLA/220026"
}
}
| 35.009709
| 78
| 0.446201
|
797ea541b46108224b378f8d2fddfed7a2de7611
| 1,346
|
py
|
Python
|
isplogger/logger.py
|
tjkessler/ISPLogger
|
46e2f57ebc456322ef0bf57f08f2313ea9f04d18
|
[
"MIT"
] | null | null | null |
isplogger/logger.py
|
tjkessler/ISPLogger
|
46e2f57ebc456322ef0bf57f08f2313ea9f04d18
|
[
"MIT"
] | null | null | null |
isplogger/logger.py
|
tjkessler/ISPLogger
|
46e2f57ebc456322ef0bf57f08f2313ea9f04d18
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# isplogger/logger.py
# v.0.2.1
# Developed in 2019 by Travis Kessler <travis.j.kessler@gmail.com>
#
# Console/text file logging
#
# stdlib imports
import logging
import datetime
import os
_MSG_FORMAT = '%(asctime)s [%(host_port)s] [%(levelname)s] %(message)s'
FILE_FORMAT = '{}.log'.format(datetime.datetime.now().strftime(
'%Y%m%d%H%M%S'
))
LOGGER = logging.getLogger('down_detector')
__stream_handler = logging.StreamHandler()
__stream_handler.setFormatter(logging.Formatter(
_MSG_FORMAT, '[%Y-%m-%d] [%H:%M:%S]'
))
LOGGER.addHandler(__stream_handler)
LOGGER.setLevel(logging.DEBUG)
def add_file_handler(log_object: logging.Logger, log_dir: str='./logs',
filename: str=FILE_FORMAT):
'''add_file_handler: adds file saving to supplied logging.Logger object
Args:
log_object (logging.Logger): logger object to add handler to
log_dir (str): directory to place logs
filename (str): name of the log file; defaults to date/time-stamp
'''
if not os.path.exists(log_dir):
os.mkdir(log_dir)
__file_handler = logging.FileHandler(os.path.join(log_dir, filename))
__file_handler.setFormatter(logging.Formatter(
_MSG_FORMAT, '[%Y-%m-%d] [%H:%M:%S]'
))
log_object.addHandler(__file_handler)
return
| 28.041667
| 75
| 0.684993
|
58a87b706172a59bd6114c14f2fd15c8a17e4e89
| 17,360
|
py
|
Python
|
ryu/app/experiments/PureSDNPkt/NormalFattree.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | 1
|
2021-03-11T01:47:35.000Z
|
2021-03-11T01:47:35.000Z
|
ryu/app/experiments/PureSDNPkt/NormalFattree.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | null | null | null |
ryu/app/experiments/PureSDNPkt/NormalFattree.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traffic_generation as t
import time
from threading import Thread
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, Intf, TCLink
from mininet.topo import Topo
import argparse
import logging
import os
import sys
import signal
from ryu.app.experiments.readfile import readIpeers
from subprocess import Popen
from multiprocessing import Process
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
import ryu.app.experiments.iperf_peers_fsize as peers1
import random
parser = argparse.ArgumentParser(description="Parameters importation")
parser.add_argument('--k', dest='k', type=int, default=4, choices=[4, 8], help="Switch fanout number")
parser.add_argument('--duration', dest='duration', type=int, default=60, help="Duration (sec) for each iperf traffic generation")
parser.add_argument('--dir', dest='output_dir', help="Directory to store outputs")
parser.add_argument('--cpu', dest='cpu', type=float, default=1.0, help='Total CPU to allocate to hosts')
parser.add_argument('--fnum', dest='flow_num', type=int, default=10, help='number of traffic')
parser.add_argument('--miceIndex', dest='miceIndex', type=float, default=0.7, help='miceIndex')
args = parser.parse_args()
class Fattree(Topo):
"""
Class of Fattree Topology.
"""
CoreSwitchList = []
AggSwitchList = []
EdgeSwitchList = []
HostList = []
def __init__(self, k, density):
self.pod = k
self.density = density
self.iCoreLayerSwitch = (k/2)**2
self.iAggLayerSwitch = k*k/2
self.iEdgeLayerSwitch = k*k/2
self.iHost = self.iEdgeLayerSwitch * density
# Init Topo
Topo.__init__(self)
def createNodes(self):
self.createCoreLayerSwitch(self.iCoreLayerSwitch)
self.createAggLayerSwitch(self.iAggLayerSwitch)
self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)
self.createHost(self.iHost)
# Create Switch and Host
def _addSwitch(self, number, level, switch_list):
"""
Create switches.
"""
for i in xrange(1, number+1):
PREFIX = str(level) + "00"
if i >= 10:
PREFIX = str(level) + "0"
switch_list.append(self.addSwitch(PREFIX + str(i)))
def createCoreLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 1, self.CoreSwitchList)
def createAggLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 2, self.AggSwitchList)
def createEdgeLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 3, self.EdgeSwitchList)
def createHost(self, NUMBER):
"""
Create hosts.
"""
for i in xrange(1, NUMBER+1):
if i >= 100:
PREFIX = "h"
elif i >= 10:
PREFIX = "h0"
else:
PREFIX = "h00"
self.HostList.append(self.addHost(PREFIX + str(i), cpu=1.0/NUMBER))
def createLinks(self, bw_c2a=1000, bw_a2e=500, bw_e2h=250):
"""
Add network links.
"""
# Core to Agg
end = self.pod/2
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.CoreSwitchList[i*end+j],
self.AggSwitchList[x+i],
bw=bw_c2a, max_queue_size=1000) # use_htb=False
# Agg to Edge
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],
bw=bw_a2e, max_queue_size=1000) # use_htb=False
# Edge to Host
for x in xrange(0, self.iEdgeLayerSwitch):
for i in xrange(0, self.density):
self.addLink(
self.EdgeSwitchList[x],
self.HostList[self.density * x + i],
bw=bw_e2h, max_queue_size=1000) # use_htb=False
def set_ovs_protocol_13(self,):
"""
Set the OpenFlow version for switches.
"""
self._set_ovs_protocol_13(self.CoreSwitchList)
self._set_ovs_protocol_13(self.AggSwitchList)
self._set_ovs_protocol_13(self.EdgeSwitchList)
def _set_ovs_protocol_13(self, sw_list):
for sw in sw_list:
cmd = "sudo ovs-vsctl set bridge %s protocols=OpenFlow13" % sw
os.system(cmd)
def set_host_ip(net, topo):
hostlist = []
for k in xrange(len(topo.HostList)):
hostlist.append(net.get(topo.HostList[k]))
i = 1
j = 1
for host in hostlist:
host.setIP("10.%d.0.%d" % (i, j))
j += 1
if j == topo.density+1:
j = 1
i += 1
def create_subnetList(topo, num):
"""
Create the subnet list of the certain Pod.
"""
subnetList = []
remainder = num % (topo.pod/2)
if topo.pod == 4:
if remainder == 0:
subnetList = [num-1, num]
elif remainder == 1:
subnetList = [num, num+1]
else:
pass
elif topo.pod == 8:
if remainder == 0:
subnetList = [num-3, num-2, num-1, num]
elif remainder == 1:
subnetList = [num, num+1, num+2, num+3]
elif remainder == 2:
subnetList = [num-1, num, num+1, num+2]
elif remainder == 3:
subnetList = [num-2, num-1, num, num+1]
else:
pass
else:
pass
return subnetList
def install_proactive(net, topo):
"""
Install proactive flow entries for switches.
"""
# Edge Switch
for sw in topo.EdgeSwitchList:
num = int(sw[-2:])
# Downstream.
for i in xrange(1, topo.density + 1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,arp, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod / 2 + i)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod / 2 + i)
os.system(cmd)
# Upstream
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,arp,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,nw_proto=1,actions=group:1'" % sw
os.system(cmd)
# Aggregate Switch
for sw in topo.AggSwitchList:
num = int(sw[-2:])
subnetList = create_subnetList(topo, num)
k = 1
for i in subnetList:
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod / 2 + k)
os.system(cmd)
# cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
# 'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
# mpls_label=%d, actions=pop_mpls:0x0800,output:%d'" % (sw, topo.pod / 2 + k, topo.pod / 2 + k)
# os.system(cmd)
# cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
# 'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
# mpls_label=%d,actions=pop_mpls:0x8847,output:%d'" % (sw, k, k)
# os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=40,ip,nw_proto=1 \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, topo.pod / 2 + k)
os.system(cmd)
k += 1
# Upstream
if topo.pod == 4:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2'" % sw
elif topo.pod == 8:
cmd = "ovs-ofctl add-group %s -O OpenFlow13 \
'group_id=1,type=select,bucket=output:1,bucket=output:2,\
bucket=output:3,bucket=output:4'" % sw
else:
pass
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,arp,actions=group:1'" % sw
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,priority=10,ip,nw_proto=1 actions=group:1'" % sw
os.system(cmd)
# Core Switch
for sw in topo.CoreSwitchList:
j = 1
k = 1
for i in xrange(1, len(topo.EdgeSwitchList)+1):
# cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
# 'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
# mpls_label=%d,actions=pop_mpls:0x8847,output:%d'" % (sw, j, j)
# os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,arp, \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip,nw_proto=1 \
nw_dst=10.%d.0.0/16, actions=output:%d'" % (sw, i, j)
os.system(cmd)
k += 1
if k == topo.pod/2 + 1:
j += 1
k = 1
def iperfTest(net, topo):
"""
Start iperf test.
"""
h001, h015, h016 = net.get(
topo.HostList[0], topo.HostList[14], topo.HostList[15])
# iperf Server
h001.popen('iperf -s -u -i 1 > iperf_server_differentPod_result', shell=True)
# iperf Server
h015.popen('iperf -s -u -i 1 > iperf_server_samePod_result', shell=True)
# iperf Client
h016.cmdPrint('iperf -c ' + h001.IP() + ' -u -t 10 -i 1 -b 10m')
h016.cmdPrint('iperf -c ' + h015.IP() + ' -u -t 10 -i 1 -b 10m')
def monitor_devs_ng(fname="./txrate.txt", interval_sec=0.1):
"""
Use bwm-ng tool to collect interface transmit rate statistics.
bwm-ng Mode: rate;
interval time: 1s.
"""
cmd = "sleep 1; bwm-ng -t %s -o csv -u bits -T rate -C ',' > %s" % (interval_sec * 1000, fname)
Popen(cmd, shell=True).wait()
def traffic_generation1(net,flows_peers,ping_peers):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 3000))
pingTest(net,ping_peers)
time.sleep(5)
monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
monitor.start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 2500) ) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
# time.sleep(1)
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
def traffic_generation(net,flows_peers,monitor1,monitor):
"""
Generate traffics and test the performance of the network.
"""
# 1.Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > /%s/%s &" % (args.output_dir, 'server'+str(count)+'.txt'))
server.cmd("iperf -s >> /dev/null &") # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
monitor = Process(target=monitor_devs_ng, args=('%s/bwmng.txt' % args.output_dir, 1.0))
monitor1.start()
count=0
for src, dest in flows_peers:
count+=1
time.sleep(1)
server = net.get(dest)
client = net.get(src)
Thread(target=iperfC,args=(client,server.IP(),3000)).start()
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 60)) # Its statistics is useless, just throw away. 1990 just means a great
# client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), random.randint(10,60)))
# time.sleep(1)
# monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
# Wait for the traffic to become stable.
# 3. The experiment is going on.
time.sleep(60)
monitor.start()
time.sleep(args.duration + 5)
monitor.terminate()
monitor1.terminate()
def iperfC(client,ip,time):
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (ip, time))
def pingTest(net,flows_peers):
for src,dst in flows_peers:
server=net.get(dst)
client=net.get(src)
# client.cmd('ping %s -c %d > %s/pingTest/ping_%s_%s_%d.txt &'%(server.IP(),60,args.output_dir,src,dst,count))
client.cmd('ping -c %d -i 0.1 -n -q %s>> %s/%s &' % (args.duration*10,server.IP(), args.output_dir,'successive_packets.txt'))
def removeOldFiles():
cmd="sudo rm -f NormalroutingResult/*"
os.system(cmd)
cmd="sudo rm -f SRroutingResult/*"
os.system(cmd)
cmd="sudo rm -f resultSolve/count_text.txt"
os.system(cmd)
def createTopo(pod, density, ip="192.168.16.138", port=6653, bw_c2a=100, bw_a2e=100, bw_e2h=100):
"""
Create network topology and run the Mininet.
"""
topo = Fattree(pod, density)
topo.createNodes()
topo.createLinks(bw_c2a=bw_c2a, bw_a2e=bw_a2e, bw_e2h=bw_e2h)
# Start Mininet.
CONTROLLER_IP = ip
CONTROLLER_PORT = port
net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True)
net.addController(
'controller', controller=RemoteController,
ip=CONTROLLER_IP, port=CONTROLLER_PORT)
net.start()
# Set OVS's protocol as OF13.
topo.set_ovs_protocol_13()
# Set hosts IP addresses.
set_host_ip(net, topo)
# Install proactive flow entries
install_proactive(net, topo)
# 2. Start the controller.
k_paths = args.k ** 2 / 4
fanout = args.k
# Controller_Ryu = Popen("ryu-manager --observe-links Normalrouting.py --k_paths=%d --weight=hop --fanout=%d --dir=%s --miceIndex=%f" %(k_paths, fanout,args.output_dir[2:],args.miceIndex), shell=True, preexec_fn=os.setsid)
Controller_Ryu = Popen("ryu-manager --observe-links Rerouting.py --k_paths=%d --weight=hop --fanout=%d" % (k_paths, fanout),shell=True, preexec_fn=os.setsid)
time.sleep(45)
# t.traffic_generation(net, iperf_peers, ping_peers)
# t.first_icmp_delay(net,peers1.iperf_peers)
CLI(net)
closeable(net,Controller_Ryu)
def closeable(net,Controller_Ryu):
os.system('killall ping')
os.system('killall iperf')
# CLI(net)
os.killpg(Controller_Ryu.pid, signal.SIGKILL)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
if os.getuid() != 0:
logging.debug("You are NOT root")
elif os.getuid() == 0:
# removeOldFiles()
# logging.debug("remove old result files")
# time.sleep(3)
createTopo(4, 2)
# createTopo(8, 4)
| 38.75
| 226
| 0.589689
|
17016be12ffacf50274dcef0241f55934c066938
| 352,457
|
py
|
Python
|
runtime/Python3/test/parser/cparser.py
|
lwehmeier/antlr4
|
f7f1b253125a7dba8ffdc65bf7985b3b591adfc8
|
[
"BSD-3-Clause"
] | 3
|
2020-05-19T05:16:31.000Z
|
2020-07-10T02:52:20.000Z
|
runtime/Python3/test/parser/cparser.py
|
lwehmeier/antlr4
|
f7f1b253125a7dba8ffdc65bf7985b3b591adfc8
|
[
"BSD-3-Clause"
] | 2
|
2019-02-16T05:28:36.000Z
|
2020-06-05T02:27:25.000Z
|
runtime/Python3/test/parser/cparser.py
|
lwehmeier/antlr4
|
f7f1b253125a7dba8ffdc65bf7985b3b591adfc8
|
[
"BSD-3-Clause"
] | 7
|
2016-11-05T23:59:44.000Z
|
2019-12-12T18:21:51.000Z
|
# Generated from C.bnf by ANTLR 4.5.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3s")
buf.write("\u04e9\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\t")
buf.write("D\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\tL\4M\t")
buf.write("M\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\tU\3\2")
buf.write("\3\2\3\2\6\2\u00ae\n\2\r\2\16\2\u00af\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\5\2\u00b8\n\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\5\2\u00cc\n\2")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\7")
buf.write("\4\u00db\n\4\f\4\16\4\u00de\13\4\3\5\3\5\3\5\3\5\3\5\3")
buf.write("\5\3\5\5\5\u00e7\n\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u010b")
buf.write("\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0115\n\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\7\6\u0122\n\6")
buf.write("\f\6\16\6\u0125\13\6\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u012d")
buf.write("\n\7\f\7\16\7\u0130\13\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3")
buf.write("\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\5\b\u0148\n\b\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\5\n\u0158\n\n\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\7\13\u0166\n")
buf.write("\13\f\13\16\13\u0169\13\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f")
buf.write("\3\f\3\f\7\f\u0174\n\f\f\f\16\f\u0177\13\f\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u0182\n\r\f\r\16\r\u0185")
buf.write("\13\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\7\16\u0196\n\16\f\16\16\16\u0199")
buf.write("\13\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\7")
buf.write("\17\u01a4\n\17\f\17\16\17\u01a7\13\17\3\20\3\20\3\20\3")
buf.write("\20\3\20\3\20\7\20\u01af\n\20\f\20\16\20\u01b2\13\20\3")
buf.write("\21\3\21\3\21\3\21\3\21\3\21\7\21\u01ba\n\21\f\21\16\21")
buf.write("\u01bd\13\21\3\22\3\22\3\22\3\22\3\22\3\22\7\22\u01c5")
buf.write("\n\22\f\22\16\22\u01c8\13\22\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\7\23\u01d0\n\23\f\23\16\23\u01d3\13\23\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\7\24\u01db\n\24\f\24\16\24\u01de")
buf.write("\13\24\3\25\3\25\3\25\3\25\3\25\3\25\5\25\u01e6\n\25\3")
buf.write("\26\3\26\3\26\3\26\3\26\5\26\u01ed\n\26\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\7\30\u01f7\n\30\f\30\16\30\u01fa")
buf.write("\13\30\3\31\3\31\3\32\3\32\5\32\u0200\n\32\3\32\3\32\3")
buf.write("\32\5\32\u0205\n\32\3\33\6\33\u0208\n\33\r\33\16\33\u0209")
buf.write("\3\34\6\34\u020d\n\34\r\34\16\34\u020e\3\35\3\35\3\35")
buf.write("\3\35\3\35\5\35\u0216\n\35\3\36\3\36\3\36\3\36\3\36\3")
buf.write("\36\7\36\u021e\n\36\f\36\16\36\u0221\13\36\3\37\3\37\3")
buf.write("\37\3\37\3\37\5\37\u0228\n\37\3 \3 \3!\3!\3!\3!\3!\3!")
buf.write("\3!\3!\3!\3!\3!\3!\3!\3!\5!\u023a\n!\3\"\3\"\5\"\u023e")
buf.write("\n\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u0247\n\"\3#\3#\3")
buf.write("$\3$\3$\3$\3$\7$\u0250\n$\f$\16$\u0253\13$\3%\3%\5%\u0257")
buf.write("\n%\3%\3%\3%\5%\u025c\n%\3&\3&\5&\u0260\n&\3&\3&\5&\u0264")
buf.write("\n&\5&\u0266\n&\3\'\3\'\3\'\3\'\3\'\3\'\7\'\u026e\n\'")
buf.write("\f\'\16\'\u0271\13\'\3(\3(\5(\u0275\n(\3(\3(\5(\u0279")
buf.write("\n(\3)\3)\5)\u027d\n)\3)\3)\3)\3)\3)\3)\5)\u0285\n)\3")
buf.write(")\3)\3)\3)\3)\3)\3)\5)\u028e\n)\3*\3*\3*\3*\3*\3*\7*\u0296")
buf.write("\n*\f*\16*\u0299\13*\3+\3+\3+\3+\3+\5+\u02a0\n+\3,\3,")
buf.write("\3-\3-\3-\3-\3-\3.\3.\3/\3/\3/\3/\3/\3/\5/\u02b1\n/\3")
buf.write("\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60\5\60")
buf.write("\u02bd\n\60\3\61\5\61\u02c0\n\61\3\61\3\61\7\61\u02c4")
buf.write("\n\61\f\61\16\61\u02c7\13\61\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\5\62\u02cf\n\62\3\62\3\62\3\62\5\62\u02d4\n\62\3")
buf.write("\62\5\62\u02d7\n\62\3\62\3\62\3\62\3\62\3\62\5\62\u02de")
buf.write("\n\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\5\62\u02ed\n\62\3\62\3\62\3\62\3\62\3")
buf.write("\62\3\62\3\62\3\62\3\62\3\62\5\62\u02f9\n\62\3\62\7\62")
buf.write("\u02fc\n\62\f\62\16\62\u02ff\13\62\3\63\3\63\3\63\6\63")
buf.write("\u0304\n\63\r\63\16\63\u0305\3\63\3\63\5\63\u030a\n\63")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65\7\65")
buf.write("\u0316\n\65\f\65\16\65\u0319\13\65\3\65\5\65\u031c\n\65")
buf.write("\3\66\3\66\3\66\5\66\u0321\n\66\3\66\5\66\u0324\n\66\3")
buf.write("\66\5\66\u0327\n\66\3\67\3\67\3\67\3\67\3\67\7\67\u032e")
buf.write("\n\67\f\67\16\67\u0331\13\67\38\38\58\u0335\n8\38\38\5")
buf.write("8\u0339\n8\38\38\38\58\u033e\n8\38\38\58\u0342\n8\38\5")
buf.write("8\u0345\n8\39\39\39\39\39\79\u034c\n9\f9\169\u034f\13")
buf.write("9\3:\3:\3:\3:\3:\5:\u0356\n:\3;\3;\3;\3;\3;\3;\7;\u035e")
buf.write("\n;\f;\16;\u0361\13;\3<\3<\3<\3<\3<\5<\u0368\n<\5<\u036a")
buf.write("\n<\3=\3=\3=\3=\3=\3=\7=\u0372\n=\f=\16=\u0375\13=\3>")
buf.write("\3>\5>\u0379\n>\3?\3?\5?\u037d\n?\3?\3?\7?\u0381\n?\f")
buf.write("?\16?\u0384\13?\5?\u0386\n?\3@\3@\3@\3@\3@\7@\u038d\n")
buf.write("@\f@\16@\u0390\13@\3@\3@\5@\u0394\n@\3@\5@\u0397\n@\3")
buf.write("@\3@\3@\3@\5@\u039d\n@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3")
buf.write("@\3@\3@\3@\5@\u03ad\n@\3@\3@\7@\u03b1\n@\f@\16@\u03b4")
buf.write("\13@\5@\u03b6\n@\3@\3@\3@\5@\u03bb\n@\3@\5@\u03be\n@\3")
buf.write("@\3@\3@\3@\3@\5@\u03c5\n@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3")
buf.write("@\3@\3@\3@\3@\3@\3@\3@\5@\u03d8\n@\3@\3@\7@\u03dc\n@\f")
buf.write("@\16@\u03df\13@\7@\u03e1\n@\f@\16@\u03e4\13@\3A\3A\3B")
buf.write("\3B\3B\3B\3B\3B\3B\3B\3B\3B\5B\u03f2\nB\3C\3C\5C\u03f6")
buf.write("\nC\3C\3C\3C\3C\3C\5C\u03fd\nC\3C\7C\u0400\nC\fC\16C\u0403")
buf.write("\13C\3D\3D\3D\3E\3E\3E\3E\3E\7E\u040d\nE\fE\16E\u0410")
buf.write("\13E\3F\3F\3F\3F\3F\3F\5F\u0418\nF\3G\3G\3G\3G\3G\6G\u041f")
buf.write("\nG\rG\16G\u0420\3G\3G\3G\3H\3H\3H\3H\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\7H\u0432\nH\fH\16H\u0435\13H\5H\u0437\nH\3H\3")
buf.write("H\3H\3H\7H\u043d\nH\fH\16H\u0440\13H\5H\u0442\nH\7H\u0444")
buf.write("\nH\fH\16H\u0447\13H\3H\3H\5H\u044b\nH\3I\3I\3I\3I\3I")
buf.write("\3I\3I\3I\3I\3I\3I\5I\u0458\nI\3J\3J\5J\u045c\nJ\3J\3")
buf.write("J\3K\3K\3K\3K\3K\7K\u0465\nK\fK\16K\u0468\13K\3L\3L\5")
buf.write("L\u046c\nL\3M\5M\u046f\nM\3M\3M\3N\3N\3N\3N\3N\3N\3N\5")
buf.write("N\u047a\nN\3N\3N\3N\3N\3N\3N\5N\u0482\nN\3O\3O\3O\3O\3")
buf.write("O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\3O\5O\u0495\nO\3O\3")
buf.write("O\5O\u0499\nO\3O\3O\5O\u049d\nO\3O\3O\3O\3O\3O\3O\5O\u04a5")
buf.write("\nO\3O\3O\5O\u04a9\nO\3O\3O\3O\5O\u04ae\nO\3P\3P\3P\3")
buf.write("P\3P\3P\3P\3P\3P\5P\u04b9\nP\3P\3P\3P\3P\3P\5P\u04c0\n")
buf.write("P\3Q\5Q\u04c3\nQ\3Q\3Q\3R\3R\3R\3R\3R\7R\u04cc\nR\fR\16")
buf.write("R\u04cf\13R\3S\3S\3S\5S\u04d4\nS\3T\5T\u04d7\nT\3T\3T")
buf.write("\5T\u04db\nT\3T\3T\3U\3U\3U\3U\3U\7U\u04e4\nU\fU\16U\u04e7")
buf.write("\13U\3U\2\36\6\n\f\24\26\30\32\34\36 \"$&.:FLRbptx~\u0084")
buf.write("\u0088\u0094\u00a2\u00a8V\2\4\6\b\n\f\16\20\22\24\26\30")
buf.write("\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`b")
buf.write("dfhjlnprtvxz|~\u0080\u0082\u0084\u0086\u0088\u008a\u008c")
buf.write("\u008e\u0090\u0092\u0094\u0096\u0098\u009a\u009c\u009e")
buf.write("\u00a0\u00a2\u00a4\u00a6\u00a8\2\16\7\2IIKKMMPPUV\3\2")
buf.write("[e\b\2\21\21\34\34$$**--<<\n\2\6\b\24\24\31\31\35\35\"")
buf.write("#\'(/\60\66\67\3\2\6\b\4\2++..\6\2\25\25%%\61\61\65\65")
buf.write("\5\2\n\13!!::\4\2=>ZZ\3\2=>\4\2\r\r\17\17\4\2\20\20\61")
buf.write("\61\u0559\2\u00cb\3\2\2\2\4\u00cd\3\2\2\2\6\u00d4\3\2")
buf.write("\2\2\b\u00e6\3\2\2\2\n\u010a\3\2\2\2\f\u0126\3\2\2\2\16")
buf.write("\u0147\3\2\2\2\20\u0149\3\2\2\2\22\u0157\3\2\2\2\24\u0159")
buf.write("\3\2\2\2\26\u016a\3\2\2\2\30\u0178\3\2\2\2\32\u0186\3")
buf.write("\2\2\2\34\u019a\3\2\2\2\36\u01a8\3\2\2\2 \u01b3\3\2\2")
buf.write("\2\"\u01be\3\2\2\2$\u01c9\3\2\2\2&\u01d4\3\2\2\2(\u01df")
buf.write("\3\2\2\2*\u01ec\3\2\2\2,\u01ee\3\2\2\2.\u01f0\3\2\2\2")
buf.write("\60\u01fb\3\2\2\2\62\u0204\3\2\2\2\64\u0207\3\2\2\2\66")
buf.write("\u020c\3\2\2\28\u0215\3\2\2\2:\u0217\3\2\2\2<\u0227\3")
buf.write("\2\2\2>\u0229\3\2\2\2@\u0239\3\2\2\2B\u0246\3\2\2\2D\u0248")
buf.write("\3\2\2\2F\u024a\3\2\2\2H\u025b\3\2\2\2J\u0265\3\2\2\2")
buf.write("L\u0267\3\2\2\2N\u0278\3\2\2\2P\u028d\3\2\2\2R\u028f\3")
buf.write("\2\2\2T\u029f\3\2\2\2V\u02a1\3\2\2\2X\u02a3\3\2\2\2Z\u02a8")
buf.write("\3\2\2\2\\\u02b0\3\2\2\2^\u02bc\3\2\2\2`\u02bf\3\2\2\2")
buf.write("b\u02ce\3\2\2\2d\u0309\3\2\2\2f\u030b\3\2\2\2h\u031b\3")
buf.write("\2\2\2j\u0326\3\2\2\2l\u032f\3\2\2\2n\u0344\3\2\2\2p\u0346")
buf.write("\3\2\2\2r\u0355\3\2\2\2t\u0357\3\2\2\2v\u0369\3\2\2\2")
buf.write("x\u036b\3\2\2\2z\u0376\3\2\2\2|\u0385\3\2\2\2~\u03b5\3")
buf.write("\2\2\2\u0080\u03e5\3\2\2\2\u0082\u03f1\3\2\2\2\u0084\u03f3")
buf.write("\3\2\2\2\u0086\u0404\3\2\2\2\u0088\u0407\3\2\2\2\u008a")
buf.write("\u0417\3\2\2\2\u008c\u0419\3\2\2\2\u008e\u044a\3\2\2\2")
buf.write("\u0090\u0457\3\2\2\2\u0092\u0459\3\2\2\2\u0094\u045f\3")
buf.write("\2\2\2\u0096\u046b\3\2\2\2\u0098\u046e\3\2\2\2\u009a\u0481")
buf.write("\3\2\2\2\u009c\u04ad\3\2\2\2\u009e\u04bf\3\2\2\2\u00a0")
buf.write("\u04c2\3\2\2\2\u00a2\u04c6\3\2\2\2\u00a4\u04d3\3\2\2\2")
buf.write("\u00a6\u04d6\3\2\2\2\u00a8\u04de\3\2\2\2\u00aa\u00cc\7")
buf.write("k\2\2\u00ab\u00cc\7l\2\2\u00ac\u00ae\7m\2\2\u00ad\u00ac")
buf.write("\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad\3\2\2\2\u00af")
buf.write("\u00b0\3\2\2\2\u00b0\u00cc\3\2\2\2\u00b1\u00b2\7=\2\2")
buf.write("\u00b2\u00b3\5.\30\2\u00b3\u00b4\7>\2\2\u00b4\u00cc\3")
buf.write("\2\2\2\u00b5\u00cc\5\4\3\2\u00b6\u00b8\7\3\2\2\u00b7\u00b6")
buf.write("\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9")
buf.write("\u00ba\7=\2\2\u00ba\u00bb\5\u0092J\2\u00bb\u00bc\7>\2")
buf.write("\2\u00bc\u00cc\3\2\2\2\u00bd\u00be\7\4\2\2\u00be\u00bf")
buf.write("\7=\2\2\u00bf\u00c0\5\16\b\2\u00c0\u00c1\7Z\2\2\u00c1")
buf.write("\u00c2\5z>\2\u00c2\u00c3\7>\2\2\u00c3\u00cc\3\2\2\2\u00c4")
buf.write("\u00c5\7\5\2\2\u00c5\u00c6\7=\2\2\u00c6\u00c7\5z>\2\u00c7")
buf.write("\u00c8\7Z\2\2\u00c8\u00c9\5\16\b\2\u00c9\u00ca\7>\2\2")
buf.write("\u00ca\u00cc\3\2\2\2\u00cb\u00aa\3\2\2\2\u00cb\u00ab\3")
buf.write("\2\2\2\u00cb\u00ad\3\2\2\2\u00cb\u00b1\3\2\2\2\u00cb\u00b5")
buf.write("\3\2\2\2\u00cb\u00b7\3\2\2\2\u00cb\u00bd\3\2\2\2\u00cb")
buf.write("\u00c4\3\2\2\2\u00cc\3\3\2\2\2\u00cd\u00ce\78\2\2\u00ce")
buf.write("\u00cf\7=\2\2\u00cf\u00d0\5*\26\2\u00d0\u00d1\7Z\2\2\u00d1")
buf.write("\u00d2\5\6\4\2\u00d2\u00d3\7>\2\2\u00d3\5\3\2\2\2\u00d4")
buf.write("\u00d5\b\4\1\2\u00d5\u00d6\5\b\5\2\u00d6\u00dc\3\2\2\2")
buf.write("\u00d7\u00d8\f\3\2\2\u00d8\u00d9\7Z\2\2\u00d9\u00db\5")
buf.write("\b\5\2\u00da\u00d7\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da")
buf.write("\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\7\3\2\2\2\u00de\u00dc")
buf.write("\3\2\2\2\u00df\u00e0\5z>\2\u00e0\u00e1\7X\2\2\u00e1\u00e2")
buf.write("\5*\26\2\u00e2\u00e7\3\2\2\2\u00e3\u00e4\7\27\2\2\u00e4")
buf.write("\u00e5\7X\2\2\u00e5\u00e7\5*\26\2\u00e6\u00df\3\2\2\2")
buf.write("\u00e6\u00e3\3\2\2\2\u00e7\t\3\2\2\2\u00e8\u00e9\b\6\1")
buf.write("\2\u00e9\u010b\5\2\2\2\u00ea\u00eb\7=\2\2\u00eb\u00ec")
buf.write("\5z>\2\u00ec\u00ed\7>\2\2\u00ed\u00ee\7A\2\2\u00ee\u00ef")
buf.write("\5\u0084C\2\u00ef\u00f0\7B\2\2\u00f0\u010b\3\2\2\2\u00f1")
buf.write("\u00f2\7=\2\2\u00f2\u00f3\5z>\2\u00f3\u00f4\7>\2\2\u00f4")
buf.write("\u00f5\7A\2\2\u00f5\u00f6\5\u0084C\2\u00f6\u00f7\7Z\2")
buf.write("\2\u00f7\u00f8\7B\2\2\u00f8\u010b\3\2\2\2\u00f9\u00fa")
buf.write("\7\3\2\2\u00fa\u00fb\7=\2\2\u00fb\u00fc\5z>\2\u00fc\u00fd")
buf.write("\7>\2\2\u00fd\u00fe\7A\2\2\u00fe\u00ff\5\u0084C\2\u00ff")
buf.write("\u0100\7B\2\2\u0100\u010b\3\2\2\2\u0101\u0102\7\3\2\2")
buf.write("\u0102\u0103\7=\2\2\u0103\u0104\5z>\2\u0104\u0105\7>\2")
buf.write("\2\u0105\u0106\7A\2\2\u0106\u0107\5\u0084C\2\u0107\u0108")
buf.write("\7Z\2\2\u0108\u0109\7B\2\2\u0109\u010b\3\2\2\2\u010a\u00e8")
buf.write("\3\2\2\2\u010a\u00ea\3\2\2\2\u010a\u00f1\3\2\2\2\u010a")
buf.write("\u00f9\3\2\2\2\u010a\u0101\3\2\2\2\u010b\u0123\3\2\2\2")
buf.write("\u010c\u010d\f\f\2\2\u010d\u010e\7?\2\2\u010e\u010f\5")
buf.write(".\30\2\u010f\u0110\7@\2\2\u0110\u0122\3\2\2\2\u0111\u0112")
buf.write("\f\13\2\2\u0112\u0114\7=\2\2\u0113\u0115\5\f\7\2\u0114")
buf.write("\u0113\3\2\2\2\u0114\u0115\3\2\2\2\u0115\u0116\3\2\2\2")
buf.write("\u0116\u0122\7>\2\2\u0117\u0118\f\n\2\2\u0118\u0119\7")
buf.write("i\2\2\u0119\u0122\7k\2\2\u011a\u011b\f\t\2\2\u011b\u011c")
buf.write("\7h\2\2\u011c\u0122\7k\2\2\u011d\u011e\f\b\2\2\u011e\u0122")
buf.write("\7J\2\2\u011f\u0120\f\7\2\2\u0120\u0122\7L\2\2\u0121\u010c")
buf.write("\3\2\2\2\u0121\u0111\3\2\2\2\u0121\u0117\3\2\2\2\u0121")
buf.write("\u011a\3\2\2\2\u0121\u011d\3\2\2\2\u0121\u011f\3\2\2\2")
buf.write("\u0122\u0125\3\2\2\2\u0123\u0121\3\2\2\2\u0123\u0124\3")
buf.write("\2\2\2\u0124\13\3\2\2\2\u0125\u0123\3\2\2\2\u0126\u0127")
buf.write("\b\7\1\2\u0127\u0128\5*\26\2\u0128\u012e\3\2\2\2\u0129")
buf.write("\u012a\f\3\2\2\u012a\u012b\7Z\2\2\u012b\u012d\5*\26\2")
buf.write("\u012c\u0129\3\2\2\2\u012d\u0130\3\2\2\2\u012e\u012c\3")
buf.write("\2\2\2\u012e\u012f\3\2\2\2\u012f\r\3\2\2\2\u0130\u012e")
buf.write("\3\2\2\2\u0131\u0148\5\n\6\2\u0132\u0133\7J\2\2\u0133")
buf.write("\u0148\5\16\b\2\u0134\u0135\7L\2\2\u0135\u0148\5\16\b")
buf.write("\2\u0136\u0137\5\20\t\2\u0137\u0138\5\22\n\2\u0138\u0148")
buf.write("\3\2\2\2\u0139\u013a\7)\2\2\u013a\u0148\5\16\b\2\u013b")
buf.write("\u013c\7)\2\2\u013c\u013d\7=\2\2\u013d\u013e\5z>\2\u013e")
buf.write("\u013f\7>\2\2\u013f\u0148\3\2\2\2\u0140\u0141\7\64\2\2")
buf.write("\u0141\u0142\7=\2\2\u0142\u0143\5z>\2\u0143\u0144\7>\2")
buf.write("\2\u0144\u0148\3\2\2\2\u0145\u0146\7R\2\2\u0146\u0148")
buf.write("\7k\2\2\u0147\u0131\3\2\2\2\u0147\u0132\3\2\2\2\u0147")
buf.write("\u0134\3\2\2\2\u0147\u0136\3\2\2\2\u0147\u0139\3\2\2\2")
buf.write("\u0147\u013b\3\2\2\2\u0147\u0140\3\2\2\2\u0147\u0145\3")
buf.write("\2\2\2\u0148\17\3\2\2\2\u0149\u014a\t\2\2\2\u014a\21\3")
buf.write("\2\2\2\u014b\u0158\5\16\b\2\u014c\u014d\7=\2\2\u014d\u014e")
buf.write("\5z>\2\u014e\u014f\7>\2\2\u014f\u0150\5\22\n\2\u0150\u0158")
buf.write("\3\2\2\2\u0151\u0152\7\3\2\2\u0152\u0153\7=\2\2\u0153")
buf.write("\u0154\5z>\2\u0154\u0155\7>\2\2\u0155\u0156\5\22\n\2\u0156")
buf.write("\u0158\3\2\2\2\u0157\u014b\3\2\2\2\u0157\u014c\3\2\2\2")
buf.write("\u0157\u0151\3\2\2\2\u0158\23\3\2\2\2\u0159\u015a\b\13")
buf.write("\1\2\u015a\u015b\5\22\n\2\u015b\u0167\3\2\2\2\u015c\u015d")
buf.write("\f\5\2\2\u015d\u015e\7M\2\2\u015e\u0166\5\22\n\2\u015f")
buf.write("\u0160\f\4\2\2\u0160\u0161\7N\2\2\u0161\u0166\5\22\n\2")
buf.write("\u0162\u0163\f\3\2\2\u0163\u0164\7O\2\2\u0164\u0166\5")
buf.write("\22\n\2\u0165\u015c\3\2\2\2\u0165\u015f\3\2\2\2\u0165")
buf.write("\u0162\3\2\2\2\u0166\u0169\3\2\2\2\u0167\u0165\3\2\2\2")
buf.write("\u0167\u0168\3\2\2\2\u0168\25\3\2\2\2\u0169\u0167\3\2")
buf.write("\2\2\u016a\u016b\b\f\1\2\u016b\u016c\5\24\13\2\u016c\u0175")
buf.write("\3\2\2\2\u016d\u016e\f\4\2\2\u016e\u016f\7I\2\2\u016f")
buf.write("\u0174\5\24\13\2\u0170\u0171\f\3\2\2\u0171\u0172\7K\2")
buf.write("\2\u0172\u0174\5\24\13\2\u0173\u016d\3\2\2\2\u0173\u0170")
buf.write("\3\2\2\2\u0174\u0177\3\2\2\2\u0175\u0173\3\2\2\2\u0175")
buf.write("\u0176\3\2\2\2\u0176\27\3\2\2\2\u0177\u0175\3\2\2\2\u0178")
buf.write("\u0179\b\r\1\2\u0179\u017a\5\26\f\2\u017a\u0183\3\2\2")
buf.write("\2\u017b\u017c\f\4\2\2\u017c\u017d\7G\2\2\u017d\u0182")
buf.write("\5\26\f\2\u017e\u017f\f\3\2\2\u017f\u0180\7H\2\2\u0180")
buf.write("\u0182\5\26\f\2\u0181\u017b\3\2\2\2\u0181\u017e\3\2\2")
buf.write("\2\u0182\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184")
buf.write("\3\2\2\2\u0184\31\3\2\2\2\u0185\u0183\3\2\2\2\u0186\u0187")
buf.write("\b\16\1\2\u0187\u0188\5\30\r\2\u0188\u0197\3\2\2\2\u0189")
buf.write("\u018a\f\6\2\2\u018a\u018b\7C\2\2\u018b\u0196\5\30\r\2")
buf.write("\u018c\u018d\f\5\2\2\u018d\u018e\7E\2\2\u018e\u0196\5")
buf.write("\30\r\2\u018f\u0190\f\4\2\2\u0190\u0191\7D\2\2\u0191\u0196")
buf.write("\5\30\r\2\u0192\u0193\f\3\2\2\u0193\u0194\7F\2\2\u0194")
buf.write("\u0196\5\30\r\2\u0195\u0189\3\2\2\2\u0195\u018c\3\2\2")
buf.write("\2\u0195\u018f\3\2\2\2\u0195\u0192\3\2\2\2\u0196\u0199")
buf.write("\3\2\2\2\u0197\u0195\3\2\2\2\u0197\u0198\3\2\2\2\u0198")
buf.write("\33\3\2\2\2\u0199\u0197\3\2\2\2\u019a\u019b\b\17\1\2\u019b")
buf.write("\u019c\5\32\16\2\u019c\u01a5\3\2\2\2\u019d\u019e\f\4\2")
buf.write("\2\u019e\u019f\7f\2\2\u019f\u01a4\5\32\16\2\u01a0\u01a1")
buf.write("\f\3\2\2\u01a1\u01a2\7g\2\2\u01a2\u01a4\5\32\16\2\u01a3")
buf.write("\u019d\3\2\2\2\u01a3\u01a0\3\2\2\2\u01a4\u01a7\3\2\2\2")
buf.write("\u01a5\u01a3\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\35\3\2")
buf.write("\2\2\u01a7\u01a5\3\2\2\2\u01a8\u01a9\b\20\1\2\u01a9\u01aa")
buf.write("\5\34\17\2\u01aa\u01b0\3\2\2\2\u01ab\u01ac\f\3\2\2\u01ac")
buf.write("\u01ad\7P\2\2\u01ad\u01af\5\34\17\2\u01ae\u01ab\3\2\2")
buf.write("\2\u01af\u01b2\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1")
buf.write("\3\2\2\2\u01b1\37\3\2\2\2\u01b2\u01b0\3\2\2\2\u01b3\u01b4")
buf.write("\b\21\1\2\u01b4\u01b5\5\36\20\2\u01b5\u01bb\3\2\2\2\u01b6")
buf.write("\u01b7\f\3\2\2\u01b7\u01b8\7T\2\2\u01b8\u01ba\5\36\20")
buf.write("\2\u01b9\u01b6\3\2\2\2\u01ba\u01bd\3\2\2\2\u01bb\u01b9")
buf.write("\3\2\2\2\u01bb\u01bc\3\2\2\2\u01bc!\3\2\2\2\u01bd\u01bb")
buf.write("\3\2\2\2\u01be\u01bf\b\22\1\2\u01bf\u01c0\5 \21\2\u01c0")
buf.write("\u01c6\3\2\2\2\u01c1\u01c2\f\3\2\2\u01c2\u01c3\7Q\2\2")
buf.write("\u01c3\u01c5\5 \21\2\u01c4\u01c1\3\2\2\2\u01c5\u01c8\3")
buf.write("\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7#")
buf.write("\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c9\u01ca\b\23\1\2\u01ca")
buf.write("\u01cb\5\"\22\2\u01cb\u01d1\3\2\2\2\u01cc\u01cd\f\3\2")
buf.write("\2\u01cd\u01ce\7R\2\2\u01ce\u01d0\5\"\22\2\u01cf\u01cc")
buf.write("\3\2\2\2\u01d0\u01d3\3\2\2\2\u01d1\u01cf\3\2\2\2\u01d1")
buf.write("\u01d2\3\2\2\2\u01d2%\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d4")
buf.write("\u01d5\b\24\1\2\u01d5\u01d6\5$\23\2\u01d6\u01dc\3\2\2")
buf.write("\2\u01d7\u01d8\f\3\2\2\u01d8\u01d9\7S\2\2\u01d9\u01db")
buf.write("\5$\23\2\u01da\u01d7\3\2\2\2\u01db\u01de\3\2\2\2\u01dc")
buf.write("\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\'\3\2\2\2\u01de")
buf.write("\u01dc\3\2\2\2\u01df\u01e5\5&\24\2\u01e0\u01e1\7W\2\2")
buf.write("\u01e1\u01e2\5.\30\2\u01e2\u01e3\7X\2\2\u01e3\u01e4\5")
buf.write("(\25\2\u01e4\u01e6\3\2\2\2\u01e5\u01e0\3\2\2\2\u01e5\u01e6")
buf.write("\3\2\2\2\u01e6)\3\2\2\2\u01e7\u01ed\5(\25\2\u01e8\u01e9")
buf.write("\5\16\b\2\u01e9\u01ea\5,\27\2\u01ea\u01eb\5*\26\2\u01eb")
buf.write("\u01ed\3\2\2\2\u01ec\u01e7\3\2\2\2\u01ec\u01e8\3\2\2\2")
buf.write("\u01ed+\3\2\2\2\u01ee\u01ef\t\3\2\2\u01ef-\3\2\2\2\u01f0")
buf.write("\u01f1\b\30\1\2\u01f1\u01f2\5*\26\2\u01f2\u01f8\3\2\2")
buf.write("\2\u01f3\u01f4\f\3\2\2\u01f4\u01f5\7Z\2\2\u01f5\u01f7")
buf.write("\5*\26\2\u01f6\u01f3\3\2\2\2\u01f7\u01fa\3\2\2\2\u01f8")
buf.write("\u01f6\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9/\3\2\2\2\u01fa")
buf.write("\u01f8\3\2\2\2\u01fb\u01fc\5(\25\2\u01fc\61\3\2\2\2\u01fd")
buf.write("\u01ff\5\64\33\2\u01fe\u0200\5:\36\2\u01ff\u01fe\3\2\2")
buf.write("\2\u01ff\u0200\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0202")
buf.write("\7Y\2\2\u0202\u0205\3\2\2\2\u0203\u0205\5\u008cG\2\u0204")
buf.write("\u01fd\3\2\2\2\u0204\u0203\3\2\2\2\u0205\63\3\2\2\2\u0206")
buf.write("\u0208\58\35\2\u0207\u0206\3\2\2\2\u0208\u0209\3\2\2\2")
buf.write("\u0209\u0207\3\2\2\2\u0209\u020a\3\2\2\2\u020a\65\3\2")
buf.write("\2\2\u020b\u020d\58\35\2\u020c\u020b\3\2\2\2\u020d\u020e")
buf.write("\3\2\2\2\u020e\u020c\3\2\2\2\u020e\u020f\3\2\2\2\u020f")
buf.write("\67\3\2\2\2\u0210\u0216\5> \2\u0211\u0216\5@!\2\u0212")
buf.write("\u0216\5Z.\2\u0213\u0216\5\\/\2\u0214\u0216\5^\60\2\u0215")
buf.write("\u0210\3\2\2\2\u0215\u0211\3\2\2\2\u0215\u0212\3\2\2\2")
buf.write("\u0215\u0213\3\2\2\2\u0215\u0214\3\2\2\2\u02169\3\2\2")
buf.write("\2\u0217\u0218\b\36\1\2\u0218\u0219\5<\37\2\u0219\u021f")
buf.write("\3\2\2\2\u021a\u021b\f\3\2\2\u021b\u021c\7Z\2\2\u021c")
buf.write("\u021e\5<\37\2\u021d\u021a\3\2\2\2\u021e\u0221\3\2\2\2")
buf.write("\u021f\u021d\3\2\2\2\u021f\u0220\3\2\2\2\u0220;\3\2\2")
buf.write("\2\u0221\u021f\3\2\2\2\u0222\u0228\5`\61\2\u0223\u0224")
buf.write("\5`\61\2\u0224\u0225\7[\2\2\u0225\u0226\5\u0082B\2\u0226")
buf.write("\u0228\3\2\2\2\u0227\u0222\3\2\2\2\u0227\u0223\3\2\2\2")
buf.write("\u0228=\3\2\2\2\u0229\u022a\t\4\2\2\u022a?\3\2\2\2\u022b")
buf.write("\u023a\t\5\2\2\u022c\u022d\7\3\2\2\u022d\u022e\7=\2\2")
buf.write("\u022e\u022f\t\6\2\2\u022f\u023a\7>\2\2\u0230\u023a\5")
buf.write("X-\2\u0231\u023a\5B\"\2\u0232\u023a\5P)\2\u0233\u023a")
buf.write("\5\u0080A\2\u0234\u0235\7\t\2\2\u0235\u0236\7=\2\2\u0236")
buf.write("\u0237\5\60\31\2\u0237\u0238\7>\2\2\u0238\u023a\3\2\2")
buf.write("\2\u0239\u022b\3\2\2\2\u0239\u022c\3\2\2\2\u0239\u0230")
buf.write("\3\2\2\2\u0239\u0231\3\2\2\2\u0239\u0232\3\2\2\2\u0239")
buf.write("\u0233\3\2\2\2\u0239\u0234\3\2\2\2\u023aA\3\2\2\2\u023b")
buf.write("\u023d\5D#\2\u023c\u023e\7k\2\2\u023d\u023c\3\2\2\2\u023d")
buf.write("\u023e\3\2\2\2\u023e\u023f\3\2\2\2\u023f\u0240\7A\2\2")
buf.write("\u0240\u0241\5F$\2\u0241\u0242\7B\2\2\u0242\u0247\3\2")
buf.write("\2\2\u0243\u0244\5D#\2\u0244\u0245\7k\2\2\u0245\u0247")
buf.write("\3\2\2\2\u0246\u023b\3\2\2\2\u0246\u0243\3\2\2\2\u0247")
buf.write("C\3\2\2\2\u0248\u0249\t\7\2\2\u0249E\3\2\2\2\u024a\u024b")
buf.write("\b$\1\2\u024b\u024c\5H%\2\u024c\u0251\3\2\2\2\u024d\u024e")
buf.write("\f\3\2\2\u024e\u0250\5H%\2\u024f\u024d\3\2\2\2\u0250\u0253")
buf.write("\3\2\2\2\u0251\u024f\3\2\2\2\u0251\u0252\3\2\2\2\u0252")
buf.write("G\3\2\2\2\u0253\u0251\3\2\2\2\u0254\u0256\5J&\2\u0255")
buf.write("\u0257\5L\'\2\u0256\u0255\3\2\2\2\u0256\u0257\3\2\2\2")
buf.write("\u0257\u0258\3\2\2\2\u0258\u0259\7Y\2\2\u0259\u025c\3")
buf.write("\2\2\2\u025a\u025c\5\u008cG\2\u025b\u0254\3\2\2\2\u025b")
buf.write("\u025a\3\2\2\2\u025cI\3\2\2\2\u025d\u025f\5@!\2\u025e")
buf.write("\u0260\5J&\2\u025f\u025e\3\2\2\2\u025f\u0260\3\2\2\2\u0260")
buf.write("\u0266\3\2\2\2\u0261\u0263\5Z.\2\u0262\u0264\5J&\2\u0263")
buf.write("\u0262\3\2\2\2\u0263\u0264\3\2\2\2\u0264\u0266\3\2\2\2")
buf.write("\u0265\u025d\3\2\2\2\u0265\u0261\3\2\2\2\u0266K\3\2\2")
buf.write("\2\u0267\u0268\b\'\1\2\u0268\u0269\5N(\2\u0269\u026f\3")
buf.write("\2\2\2\u026a\u026b\f\3\2\2\u026b\u026c\7Z\2\2\u026c\u026e")
buf.write("\5N(\2\u026d\u026a\3\2\2\2\u026e\u0271\3\2\2\2\u026f\u026d")
buf.write("\3\2\2\2\u026f\u0270\3\2\2\2\u0270M\3\2\2\2\u0271\u026f")
buf.write("\3\2\2\2\u0272\u0279\5`\61\2\u0273\u0275\5`\61\2\u0274")
buf.write("\u0273\3\2\2\2\u0274\u0275\3\2\2\2\u0275\u0276\3\2\2\2")
buf.write("\u0276\u0277\7X\2\2\u0277\u0279\5\60\31\2\u0278\u0272")
buf.write("\3\2\2\2\u0278\u0274\3\2\2\2\u0279O\3\2\2\2\u027a\u027c")
buf.write("\7\33\2\2\u027b\u027d\7k\2\2\u027c\u027b\3\2\2\2\u027c")
buf.write("\u027d\3\2\2\2\u027d\u027e\3\2\2\2\u027e\u027f\7A\2\2")
buf.write("\u027f\u0280\5R*\2\u0280\u0281\7B\2\2\u0281\u028e\3\2")
buf.write("\2\2\u0282\u0284\7\33\2\2\u0283\u0285\7k\2\2\u0284\u0283")
buf.write("\3\2\2\2\u0284\u0285\3\2\2\2\u0285\u0286\3\2\2\2\u0286")
buf.write("\u0287\7A\2\2\u0287\u0288\5R*\2\u0288\u0289\7Z\2\2\u0289")
buf.write("\u028a\7B\2\2\u028a\u028e\3\2\2\2\u028b\u028c\7\33\2\2")
buf.write("\u028c\u028e\7k\2\2\u028d\u027a\3\2\2\2\u028d\u0282\3")
buf.write("\2\2\2\u028d\u028b\3\2\2\2\u028eQ\3\2\2\2\u028f\u0290")
buf.write("\b*\1\2\u0290\u0291\5T+\2\u0291\u0297\3\2\2\2\u0292\u0293")
buf.write("\f\3\2\2\u0293\u0294\7Z\2\2\u0294\u0296\5T+\2\u0295\u0292")
buf.write("\3\2\2\2\u0296\u0299\3\2\2\2\u0297\u0295\3\2\2\2\u0297")
buf.write("\u0298\3\2\2\2\u0298S\3\2\2\2\u0299\u0297\3\2\2\2\u029a")
buf.write("\u02a0\5V,\2\u029b\u029c\5V,\2\u029c\u029d\7[\2\2\u029d")
buf.write("\u029e\5\60\31\2\u029e\u02a0\3\2\2\2\u029f\u029a\3\2\2")
buf.write("\2\u029f\u029b\3\2\2\2\u02a0U\3\2\2\2\u02a1\u02a2\7k\2")
buf.write("\2\u02a2W\3\2\2\2\u02a3\u02a4\7\65\2\2\u02a4\u02a5\7=")
buf.write("\2\2\u02a5\u02a6\5z>\2\u02a6\u02a7\7>\2\2\u02a7Y\3\2\2")
buf.write("\2\u02a8\u02a9\t\b\2\2\u02a9[\3\2\2\2\u02aa\u02b1\t\t")
buf.write("\2\2\u02ab\u02b1\5f\64\2\u02ac\u02ad\7\f\2\2\u02ad\u02ae")
buf.write("\7=\2\2\u02ae\u02af\7k\2\2\u02af\u02b1\7>\2\2\u02b0\u02aa")
buf.write("\3\2\2\2\u02b0\u02ab\3\2\2\2\u02b0\u02ac\3\2\2\2\u02b1")
buf.write("]\3\2\2\2\u02b2\u02b3\7\63\2\2\u02b3\u02b4\7=\2\2\u02b4")
buf.write("\u02b5\5z>\2\u02b5\u02b6\7>\2\2\u02b6\u02bd\3\2\2\2\u02b7")
buf.write("\u02b8\7\63\2\2\u02b8\u02b9\7=\2\2\u02b9\u02ba\5\60\31")
buf.write("\2\u02ba\u02bb\7>\2\2\u02bb\u02bd\3\2\2\2\u02bc\u02b2")
buf.write("\3\2\2\2\u02bc\u02b7\3\2\2\2\u02bd_\3\2\2\2\u02be\u02c0")
buf.write("\5n8\2\u02bf\u02be\3\2\2\2\u02bf\u02c0\3\2\2\2\u02c0\u02c1")
buf.write("\3\2\2\2\u02c1\u02c5\5b\62\2\u02c2\u02c4\5d\63\2\u02c3")
buf.write("\u02c2\3\2\2\2\u02c4\u02c7\3\2\2\2\u02c5\u02c3\3\2\2\2")
buf.write("\u02c5\u02c6\3\2\2\2\u02c6a\3\2\2\2\u02c7\u02c5\3\2\2")
buf.write("\2\u02c8\u02c9\b\62\1\2\u02c9\u02cf\7k\2\2\u02ca\u02cb")
buf.write("\7=\2\2\u02cb\u02cc\5`\61\2\u02cc\u02cd\7>\2\2\u02cd\u02cf")
buf.write("\3\2\2\2\u02ce\u02c8\3\2\2\2\u02ce\u02ca\3\2\2\2\u02cf")
buf.write("\u02fd\3\2\2\2\u02d0\u02d1\f\b\2\2\u02d1\u02d3\7?\2\2")
buf.write("\u02d2\u02d4\5p9\2\u02d3\u02d2\3\2\2\2\u02d3\u02d4\3\2")
buf.write("\2\2\u02d4\u02d6\3\2\2\2\u02d5\u02d7\5*\26\2\u02d6\u02d5")
buf.write("\3\2\2\2\u02d6\u02d7\3\2\2\2\u02d7\u02d8\3\2\2\2\u02d8")
buf.write("\u02fc\7@\2\2\u02d9\u02da\f\7\2\2\u02da\u02db\7?\2\2\u02db")
buf.write("\u02dd\7*\2\2\u02dc\u02de\5p9\2\u02dd\u02dc\3\2\2\2\u02dd")
buf.write("\u02de\3\2\2\2\u02de\u02df\3\2\2\2\u02df\u02e0\5*\26\2")
buf.write("\u02e0\u02e1\7@\2\2\u02e1\u02fc\3\2\2\2\u02e2\u02e3\f")
buf.write("\6\2\2\u02e3\u02e4\7?\2\2\u02e4\u02e5\5p9\2\u02e5\u02e6")
buf.write("\7*\2\2\u02e6\u02e7\5*\26\2\u02e7\u02e8\7@\2\2\u02e8\u02fc")
buf.write("\3\2\2\2\u02e9\u02ea\f\5\2\2\u02ea\u02ec\7?\2\2\u02eb")
buf.write("\u02ed\5p9\2\u02ec\u02eb\3\2\2\2\u02ec\u02ed\3\2\2\2\u02ed")
buf.write("\u02ee\3\2\2\2\u02ee\u02ef\7M\2\2\u02ef\u02fc\7@\2\2\u02f0")
buf.write("\u02f1\f\4\2\2\u02f1\u02f2\7=\2\2\u02f2\u02f3\5r:\2\u02f3")
buf.write("\u02f4\7>\2\2\u02f4\u02fc\3\2\2\2\u02f5\u02f6\f\3\2\2")
buf.write("\u02f6\u02f8\7=\2\2\u02f7\u02f9\5x=\2\u02f8\u02f7\3\2")
buf.write("\2\2\u02f8\u02f9\3\2\2\2\u02f9\u02fa\3\2\2\2\u02fa\u02fc")
buf.write("\7>\2\2\u02fb\u02d0\3\2\2\2\u02fb\u02d9\3\2\2\2\u02fb")
buf.write("\u02e2\3\2\2\2\u02fb\u02e9\3\2\2\2\u02fb\u02f0\3\2\2\2")
buf.write("\u02fb\u02f5\3\2\2\2\u02fc\u02ff\3\2\2\2\u02fd\u02fb\3")
buf.write("\2\2\2\u02fd\u02fe\3\2\2\2\u02fec\3\2\2\2\u02ff\u02fd")
buf.write("\3\2\2\2\u0300\u0301\7\r\2\2\u0301\u0303\7=\2\2\u0302")
buf.write("\u0304\7m\2\2\u0303\u0302\3\2\2\2\u0304\u0305\3\2\2\2")
buf.write("\u0305\u0303\3\2\2\2\u0305\u0306\3\2\2\2\u0306\u0307\3")
buf.write("\2\2\2\u0307\u030a\7>\2\2\u0308\u030a\5f\64\2\u0309\u0300")
buf.write("\3\2\2\2\u0309\u0308\3\2\2\2\u030ae\3\2\2\2\u030b\u030c")
buf.write("\7\16\2\2\u030c\u030d\7=\2\2\u030d\u030e\7=\2\2\u030e")
buf.write("\u030f\5h\65\2\u030f\u0310\7>\2\2\u0310\u0311\7>\2\2\u0311")
buf.write("g\3\2\2\2\u0312\u0317\5j\66\2\u0313\u0314\7Z\2\2\u0314")
buf.write("\u0316\5j\66\2\u0315\u0313\3\2\2\2\u0316\u0319\3\2\2\2")
buf.write("\u0317\u0315\3\2\2\2\u0317\u0318\3\2\2\2\u0318\u031c\3")
buf.write("\2\2\2\u0319\u0317\3\2\2\2\u031a\u031c\3\2\2\2\u031b\u0312")
buf.write("\3\2\2\2\u031b\u031a\3\2\2\2\u031ci\3\2\2\2\u031d\u0323")
buf.write("\n\n\2\2\u031e\u0320\7=\2\2\u031f\u0321\5\f\7\2\u0320")
buf.write("\u031f\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0322\3\2\2\2")
buf.write("\u0322\u0324\7>\2\2\u0323\u031e\3\2\2\2\u0323\u0324\3")
buf.write("\2\2\2\u0324\u0327\3\2\2\2\u0325\u0327\3\2\2\2\u0326\u031d")
buf.write("\3\2\2\2\u0326\u0325\3\2\2\2\u0327k\3\2\2\2\u0328\u032e")
buf.write("\n\13\2\2\u0329\u032a\7=\2\2\u032a\u032b\5l\67\2\u032b")
buf.write("\u032c\7>\2\2\u032c\u032e\3\2\2\2\u032d\u0328\3\2\2\2")
buf.write("\u032d\u0329\3\2\2\2\u032e\u0331\3\2\2\2\u032f\u032d\3")
buf.write("\2\2\2\u032f\u0330\3\2\2\2\u0330m\3\2\2\2\u0331\u032f")
buf.write("\3\2\2\2\u0332\u0334\7M\2\2\u0333\u0335\5p9\2\u0334\u0333")
buf.write("\3\2\2\2\u0334\u0335\3\2\2\2\u0335\u0345\3\2\2\2\u0336")
buf.write("\u0338\7M\2\2\u0337\u0339\5p9\2\u0338\u0337\3\2\2\2\u0338")
buf.write("\u0339\3\2\2\2\u0339\u033a\3\2\2\2\u033a\u0345\5n8\2\u033b")
buf.write("\u033d\7T\2\2\u033c\u033e\5p9\2\u033d\u033c\3\2\2\2\u033d")
buf.write("\u033e\3\2\2\2\u033e\u0345\3\2\2\2\u033f\u0341\7T\2\2")
buf.write("\u0340\u0342\5p9\2\u0341\u0340\3\2\2\2\u0341\u0342\3\2")
buf.write("\2\2\u0342\u0343\3\2\2\2\u0343\u0345\5n8\2\u0344\u0332")
buf.write("\3\2\2\2\u0344\u0336\3\2\2\2\u0344\u033b\3\2\2\2\u0344")
buf.write("\u033f\3\2\2\2\u0345o\3\2\2\2\u0346\u0347\b9\1\2\u0347")
buf.write("\u0348\5Z.\2\u0348\u034d\3\2\2\2\u0349\u034a\f\3\2\2\u034a")
buf.write("\u034c\5Z.\2\u034b\u0349\3\2\2\2\u034c\u034f\3\2\2\2\u034d")
buf.write("\u034b\3\2\2\2\u034d\u034e\3\2\2\2\u034eq\3\2\2\2\u034f")
buf.write("\u034d\3\2\2\2\u0350\u0356\5t;\2\u0351\u0352\5t;\2\u0352")
buf.write("\u0353\7Z\2\2\u0353\u0354\7j\2\2\u0354\u0356\3\2\2\2\u0355")
buf.write("\u0350\3\2\2\2\u0355\u0351\3\2\2\2\u0356s\3\2\2\2\u0357")
buf.write("\u0358\b;\1\2\u0358\u0359\5v<\2\u0359\u035f\3\2\2\2\u035a")
buf.write("\u035b\f\3\2\2\u035b\u035c\7Z\2\2\u035c\u035e\5v<\2\u035d")
buf.write("\u035a\3\2\2\2\u035e\u0361\3\2\2\2\u035f\u035d\3\2\2\2")
buf.write("\u035f\u0360\3\2\2\2\u0360u\3\2\2\2\u0361\u035f\3\2\2")
buf.write("\2\u0362\u0363\5\64\33\2\u0363\u0364\5`\61\2\u0364\u036a")
buf.write("\3\2\2\2\u0365\u0367\5\66\34\2\u0366\u0368\5|?\2\u0367")
buf.write("\u0366\3\2\2\2\u0367\u0368\3\2\2\2\u0368\u036a\3\2\2\2")
buf.write("\u0369\u0362\3\2\2\2\u0369\u0365\3\2\2\2\u036aw\3\2\2")
buf.write("\2\u036b\u036c\b=\1\2\u036c\u036d\7k\2\2\u036d\u0373\3")
buf.write("\2\2\2\u036e\u036f\f\3\2\2\u036f\u0370\7Z\2\2\u0370\u0372")
buf.write("\7k\2\2\u0371\u036e\3\2\2\2\u0372\u0375\3\2\2\2\u0373")
buf.write("\u0371\3\2\2\2\u0373\u0374\3\2\2\2\u0374y\3\2\2\2\u0375")
buf.write("\u0373\3\2\2\2\u0376\u0378\5J&\2\u0377\u0379\5|?\2\u0378")
buf.write("\u0377\3\2\2\2\u0378\u0379\3\2\2\2\u0379{\3\2\2\2\u037a")
buf.write("\u0386\5n8\2\u037b\u037d\5n8\2\u037c\u037b\3\2\2\2\u037c")
buf.write("\u037d\3\2\2\2\u037d\u037e\3\2\2\2\u037e\u0382\5~@\2\u037f")
buf.write("\u0381\5d\63\2\u0380\u037f\3\2\2\2\u0381\u0384\3\2\2\2")
buf.write("\u0382\u0380\3\2\2\2\u0382\u0383\3\2\2\2\u0383\u0386\3")
buf.write("\2\2\2\u0384\u0382\3\2\2\2\u0385\u037a\3\2\2\2\u0385\u037c")
buf.write("\3\2\2\2\u0386}\3\2\2\2\u0387\u0388\b@\1\2\u0388\u0389")
buf.write("\7=\2\2\u0389\u038a\5|?\2\u038a\u038e\7>\2\2\u038b\u038d")
buf.write("\5d\63\2\u038c\u038b\3\2\2\2\u038d\u0390\3\2\2\2\u038e")
buf.write("\u038c\3\2\2\2\u038e\u038f\3\2\2\2\u038f\u03b6\3\2\2\2")
buf.write("\u0390\u038e\3\2\2\2\u0391\u0393\7?\2\2\u0392\u0394\5")
buf.write("p9\2\u0393\u0392\3\2\2\2\u0393\u0394\3\2\2\2\u0394\u0396")
buf.write("\3\2\2\2\u0395\u0397\5*\26\2\u0396\u0395\3\2\2\2\u0396")
buf.write("\u0397\3\2\2\2\u0397\u0398\3\2\2\2\u0398\u03b6\7@\2\2")
buf.write("\u0399\u039a\7?\2\2\u039a\u039c\7*\2\2\u039b\u039d\5p")
buf.write("9\2\u039c\u039b\3\2\2\2\u039c\u039d\3\2\2\2\u039d\u039e")
buf.write("\3\2\2\2\u039e\u039f\5*\26\2\u039f\u03a0\7@\2\2\u03a0")
buf.write("\u03b6\3\2\2\2\u03a1\u03a2\7?\2\2\u03a2\u03a3\5p9\2\u03a3")
buf.write("\u03a4\7*\2\2\u03a4\u03a5\5*\26\2\u03a5\u03a6\7@\2\2\u03a6")
buf.write("\u03b6\3\2\2\2\u03a7\u03a8\7?\2\2\u03a8\u03a9\7M\2\2\u03a9")
buf.write("\u03b6\7@\2\2\u03aa\u03ac\7=\2\2\u03ab\u03ad\5r:\2\u03ac")
buf.write("\u03ab\3\2\2\2\u03ac\u03ad\3\2\2\2\u03ad\u03ae\3\2\2\2")
buf.write("\u03ae\u03b2\7>\2\2\u03af\u03b1\5d\63\2\u03b0\u03af\3")
buf.write("\2\2\2\u03b1\u03b4\3\2\2\2\u03b2\u03b0\3\2\2\2\u03b2\u03b3")
buf.write("\3\2\2\2\u03b3\u03b6\3\2\2\2\u03b4\u03b2\3\2\2\2\u03b5")
buf.write("\u0387\3\2\2\2\u03b5\u0391\3\2\2\2\u03b5\u0399\3\2\2\2")
buf.write("\u03b5\u03a1\3\2\2\2\u03b5\u03a7\3\2\2\2\u03b5\u03aa\3")
buf.write("\2\2\2\u03b6\u03e2\3\2\2\2\u03b7\u03b8\f\7\2\2\u03b8\u03ba")
buf.write("\7?\2\2\u03b9\u03bb\5p9\2\u03ba\u03b9\3\2\2\2\u03ba\u03bb")
buf.write("\3\2\2\2\u03bb\u03bd\3\2\2\2\u03bc\u03be\5*\26\2\u03bd")
buf.write("\u03bc\3\2\2\2\u03bd\u03be\3\2\2\2\u03be\u03bf\3\2\2\2")
buf.write("\u03bf\u03e1\7@\2\2\u03c0\u03c1\f\6\2\2\u03c1\u03c2\7")
buf.write("?\2\2\u03c2\u03c4\7*\2\2\u03c3\u03c5\5p9\2\u03c4\u03c3")
buf.write("\3\2\2\2\u03c4\u03c5\3\2\2\2\u03c5\u03c6\3\2\2\2\u03c6")
buf.write("\u03c7\5*\26\2\u03c7\u03c8\7@\2\2\u03c8\u03e1\3\2\2\2")
buf.write("\u03c9\u03ca\f\5\2\2\u03ca\u03cb\7?\2\2\u03cb\u03cc\5")
buf.write("p9\2\u03cc\u03cd\7*\2\2\u03cd\u03ce\5*\26\2\u03ce\u03cf")
buf.write("\7@\2\2\u03cf\u03e1\3\2\2\2\u03d0\u03d1\f\4\2\2\u03d1")
buf.write("\u03d2\7?\2\2\u03d2\u03d3\7M\2\2\u03d3\u03e1\7@\2\2\u03d4")
buf.write("\u03d5\f\3\2\2\u03d5\u03d7\7=\2\2\u03d6\u03d8\5r:\2\u03d7")
buf.write("\u03d6\3\2\2\2\u03d7\u03d8\3\2\2\2\u03d8\u03d9\3\2\2\2")
buf.write("\u03d9\u03dd\7>\2\2\u03da\u03dc\5d\63\2\u03db\u03da\3")
buf.write("\2\2\2\u03dc\u03df\3\2\2\2\u03dd\u03db\3\2\2\2\u03dd\u03de")
buf.write("\3\2\2\2\u03de\u03e1\3\2\2\2\u03df\u03dd\3\2\2\2\u03e0")
buf.write("\u03b7\3\2\2\2\u03e0\u03c0\3\2\2\2\u03e0\u03c9\3\2\2\2")
buf.write("\u03e0\u03d0\3\2\2\2\u03e0\u03d4\3\2\2\2\u03e1\u03e4\3")
buf.write("\2\2\2\u03e2\u03e0\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3\177")
buf.write("\3\2\2\2\u03e4\u03e2\3\2\2\2\u03e5\u03e6\7k\2\2\u03e6")
buf.write("\u0081\3\2\2\2\u03e7\u03f2\5*\26\2\u03e8\u03e9\7A\2\2")
buf.write("\u03e9\u03ea\5\u0084C\2\u03ea\u03eb\7B\2\2\u03eb\u03f2")
buf.write("\3\2\2\2\u03ec\u03ed\7A\2\2\u03ed\u03ee\5\u0084C\2\u03ee")
buf.write("\u03ef\7Z\2\2\u03ef\u03f0\7B\2\2\u03f0\u03f2\3\2\2\2\u03f1")
buf.write("\u03e7\3\2\2\2\u03f1\u03e8\3\2\2\2\u03f1\u03ec\3\2\2\2")
buf.write("\u03f2\u0083\3\2\2\2\u03f3\u03f5\bC\1\2\u03f4\u03f6\5")
buf.write("\u0086D\2\u03f5\u03f4\3\2\2\2\u03f5\u03f6\3\2\2\2\u03f6")
buf.write("\u03f7\3\2\2\2\u03f7\u03f8\5\u0082B\2\u03f8\u0401\3\2")
buf.write("\2\2\u03f9\u03fa\f\3\2\2\u03fa\u03fc\7Z\2\2\u03fb\u03fd")
buf.write("\5\u0086D\2\u03fc\u03fb\3\2\2\2\u03fc\u03fd\3\2\2\2\u03fd")
buf.write("\u03fe\3\2\2\2\u03fe\u0400\5\u0082B\2\u03ff\u03f9\3\2")
buf.write("\2\2\u0400\u0403\3\2\2\2\u0401\u03ff\3\2\2\2\u0401\u0402")
buf.write("\3\2\2\2\u0402\u0085\3\2\2\2\u0403\u0401\3\2\2\2\u0404")
buf.write("\u0405\5\u0088E\2\u0405\u0406\7[\2\2\u0406\u0087\3\2\2")
buf.write("\2\u0407\u0408\bE\1\2\u0408\u0409\5\u008aF\2\u0409\u040e")
buf.write("\3\2\2\2\u040a\u040b\f\3\2\2\u040b\u040d\5\u008aF\2\u040c")
buf.write("\u040a\3\2\2\2\u040d\u0410\3\2\2\2\u040e\u040c\3\2\2\2")
buf.write("\u040e\u040f\3\2\2\2\u040f\u0089\3\2\2\2\u0410\u040e\3")
buf.write("\2\2\2\u0411\u0412\7?\2\2\u0412\u0413\5\60\31\2\u0413")
buf.write("\u0414\7@\2\2\u0414\u0418\3\2\2\2\u0415\u0416\7i\2\2\u0416")
buf.write("\u0418\7k\2\2\u0417\u0411\3\2\2\2\u0417\u0415\3\2\2\2")
buf.write("\u0418\u008b\3\2\2\2\u0419\u041a\7;\2\2\u041a\u041b\7")
buf.write("=\2\2\u041b\u041c\5\60\31\2\u041c\u041e\7Z\2\2\u041d\u041f")
buf.write("\7m\2\2\u041e\u041d\3\2\2\2\u041f\u0420\3\2\2\2\u0420")
buf.write("\u041e\3\2\2\2\u0420\u0421\3\2\2\2\u0421\u0422\3\2\2\2")
buf.write("\u0422\u0423\7>\2\2\u0423\u0424\7Y\2\2\u0424\u008d\3\2")
buf.write("\2\2\u0425\u044b\5\u0090I\2\u0426\u044b\5\u0092J\2\u0427")
buf.write("\u044b\5\u0098M\2\u0428\u044b\5\u009aN\2\u0429\u044b\5")
buf.write("\u009cO\2\u042a\u044b\5\u009eP\2\u042b\u042c\t\f\2\2\u042c")
buf.write("\u042d\t\r\2\2\u042d\u0436\7=\2\2\u042e\u0433\5&\24\2")
buf.write("\u042f\u0430\7Z\2\2\u0430\u0432\5&\24\2\u0431\u042f\3")
buf.write("\2\2\2\u0432\u0435\3\2\2\2\u0433\u0431\3\2\2\2\u0433\u0434")
buf.write("\3\2\2\2\u0434\u0437\3\2\2\2\u0435\u0433\3\2\2\2\u0436")
buf.write("\u042e\3\2\2\2\u0436\u0437\3\2\2\2\u0437\u0445\3\2\2\2")
buf.write("\u0438\u0441\7X\2\2\u0439\u043e\5&\24\2\u043a\u043b\7")
buf.write("Z\2\2\u043b\u043d\5&\24\2\u043c\u043a\3\2\2\2\u043d\u0440")
buf.write("\3\2\2\2\u043e\u043c\3\2\2\2\u043e\u043f\3\2\2\2\u043f")
buf.write("\u0442\3\2\2\2\u0440\u043e\3\2\2\2\u0441\u0439\3\2\2\2")
buf.write("\u0441\u0442\3\2\2\2\u0442\u0444\3\2\2\2\u0443\u0438\3")
buf.write("\2\2\2\u0444\u0447\3\2\2\2\u0445\u0443\3\2\2\2\u0445\u0446")
buf.write("\3\2\2\2\u0446\u0448\3\2\2\2\u0447\u0445\3\2\2\2\u0448")
buf.write("\u0449\7>\2\2\u0449\u044b\7Y\2\2\u044a\u0425\3\2\2\2\u044a")
buf.write("\u0426\3\2\2\2\u044a\u0427\3\2\2\2\u044a\u0428\3\2\2\2")
buf.write("\u044a\u0429\3\2\2\2\u044a\u042a\3\2\2\2\u044a\u042b\3")
buf.write("\2\2\2\u044b\u008f\3\2\2\2\u044c\u044d\7k\2\2\u044d\u044e")
buf.write("\7X\2\2\u044e\u0458\5\u008eH\2\u044f\u0450\7\23\2\2\u0450")
buf.write("\u0451\5\60\31\2\u0451\u0452\7X\2\2\u0452\u0453\5\u008e")
buf.write("H\2\u0453\u0458\3\2\2\2\u0454\u0455\7\27\2\2\u0455\u0456")
buf.write("\7X\2\2\u0456\u0458\5\u008eH\2\u0457\u044c\3\2\2\2\u0457")
buf.write("\u044f\3\2\2\2\u0457\u0454\3\2\2\2\u0458\u0091\3\2\2\2")
buf.write("\u0459\u045b\7A\2\2\u045a\u045c\5\u0094K\2\u045b\u045a")
buf.write("\3\2\2\2\u045b\u045c\3\2\2\2\u045c\u045d\3\2\2\2\u045d")
buf.write("\u045e\7B\2\2\u045e\u0093\3\2\2\2\u045f\u0460\bK\1\2\u0460")
buf.write("\u0461\5\u0096L\2\u0461\u0466\3\2\2\2\u0462\u0463\f\3")
buf.write("\2\2\u0463\u0465\5\u0096L\2\u0464\u0462\3\2\2\2\u0465")
buf.write("\u0468\3\2\2\2\u0466\u0464\3\2\2\2\u0466\u0467\3\2\2\2")
buf.write("\u0467\u0095\3\2\2\2\u0468\u0466\3\2\2\2\u0469\u046c\5")
buf.write("\62\32\2\u046a\u046c\5\u008eH\2\u046b\u0469\3\2\2\2\u046b")
buf.write("\u046a\3\2\2\2\u046c\u0097\3\2\2\2\u046d\u046f\5.\30\2")
buf.write("\u046e\u046d\3\2\2\2\u046e\u046f\3\2\2\2\u046f\u0470\3")
buf.write("\2\2\2\u0470\u0471\7Y\2\2\u0471\u0099\3\2\2\2\u0472\u0473")
buf.write("\7 \2\2\u0473\u0474\7=\2\2\u0474\u0475\5.\30\2\u0475\u0476")
buf.write("\7>\2\2\u0476\u0479\5\u008eH\2\u0477\u0478\7\32\2\2\u0478")
buf.write("\u047a\5\u008eH\2\u0479\u0477\3\2\2\2\u0479\u047a\3\2")
buf.write("\2\2\u047a\u0482\3\2\2\2\u047b\u047c\7,\2\2\u047c\u047d")
buf.write("\7=\2\2\u047d\u047e\5.\30\2\u047e\u047f\7>\2\2\u047f\u0480")
buf.write("\5\u008eH\2\u0480\u0482\3\2\2\2\u0481\u0472\3\2\2\2\u0481")
buf.write("\u047b\3\2\2\2\u0482\u009b\3\2\2\2\u0483\u0484\7\62\2")
buf.write("\2\u0484\u0485\7=\2\2\u0485\u0486\5.\30\2\u0486\u0487")
buf.write("\7>\2\2\u0487\u0488\5\u008eH\2\u0488\u04ae\3\2\2\2\u0489")
buf.write("\u048a\7\30\2\2\u048a\u048b\5\u008eH\2\u048b\u048c\7\62")
buf.write("\2\2\u048c\u048d\7=\2\2\u048d\u048e\5.\30\2\u048e\u048f")
buf.write("\7>\2\2\u048f\u0490\7Y\2\2\u0490\u04ae\3\2\2\2\u0491\u0492")
buf.write("\7\36\2\2\u0492\u0494\7=\2\2\u0493\u0495\5.\30\2\u0494")
buf.write("\u0493\3\2\2\2\u0494\u0495\3\2\2\2\u0495\u0496\3\2\2\2")
buf.write("\u0496\u0498\7Y\2\2\u0497\u0499\5.\30\2\u0498\u0497\3")
buf.write("\2\2\2\u0498\u0499\3\2\2\2\u0499\u049a\3\2\2\2\u049a\u049c")
buf.write("\7Y\2\2\u049b\u049d\5.\30\2\u049c\u049b\3\2\2\2\u049c")
buf.write("\u049d\3\2\2\2\u049d\u049e\3\2\2\2\u049e\u049f\7>\2\2")
buf.write("\u049f\u04ae\5\u008eH\2\u04a0\u04a1\7\36\2\2\u04a1\u04a2")
buf.write("\7=\2\2\u04a2\u04a4\5\62\32\2\u04a3\u04a5\5.\30\2\u04a4")
buf.write("\u04a3\3\2\2\2\u04a4\u04a5\3\2\2\2\u04a5\u04a6\3\2\2\2")
buf.write("\u04a6\u04a8\7Y\2\2\u04a7\u04a9\5.\30\2\u04a8\u04a7\3")
buf.write("\2\2\2\u04a8\u04a9\3\2\2\2\u04a9\u04aa\3\2\2\2\u04aa\u04ab")
buf.write("\7>\2\2\u04ab\u04ac\5\u008eH\2\u04ac\u04ae\3\2\2\2\u04ad")
buf.write("\u0483\3\2\2\2\u04ad\u0489\3\2\2\2\u04ad\u0491\3\2\2\2")
buf.write("\u04ad\u04a0\3\2\2\2\u04ae\u009d\3\2\2\2\u04af\u04b0\7")
buf.write("\37\2\2\u04b0\u04b1\7k\2\2\u04b1\u04c0\7Y\2\2\u04b2\u04b3")
buf.write("\7\26\2\2\u04b3\u04c0\7Y\2\2\u04b4\u04b5\7\22\2\2\u04b5")
buf.write("\u04c0\7Y\2\2\u04b6\u04b8\7&\2\2\u04b7\u04b9\5.\30\2\u04b8")
buf.write("\u04b7\3\2\2\2\u04b8\u04b9\3\2\2\2\u04b9\u04ba\3\2\2\2")
buf.write("\u04ba\u04c0\7Y\2\2\u04bb\u04bc\7\37\2\2\u04bc\u04bd\5")
buf.write("\16\b\2\u04bd\u04be\7Y\2\2\u04be\u04c0\3\2\2\2\u04bf\u04af")
buf.write("\3\2\2\2\u04bf\u04b2\3\2\2\2\u04bf\u04b4\3\2\2\2\u04bf")
buf.write("\u04b6\3\2\2\2\u04bf\u04bb\3\2\2\2\u04c0\u009f\3\2\2\2")
buf.write("\u04c1\u04c3\5\u00a2R\2\u04c2\u04c1\3\2\2\2\u04c2\u04c3")
buf.write("\3\2\2\2\u04c3\u04c4\3\2\2\2\u04c4\u04c5\7\2\2\3\u04c5")
buf.write("\u00a1\3\2\2\2\u04c6\u04c7\bR\1\2\u04c7\u04c8\5\u00a4")
buf.write("S\2\u04c8\u04cd\3\2\2\2\u04c9\u04ca\f\3\2\2\u04ca\u04cc")
buf.write("\5\u00a4S\2\u04cb\u04c9\3\2\2\2\u04cc\u04cf\3\2\2\2\u04cd")
buf.write("\u04cb\3\2\2\2\u04cd\u04ce\3\2\2\2\u04ce\u00a3\3\2\2\2")
buf.write("\u04cf\u04cd\3\2\2\2\u04d0\u04d4\5\u00a6T\2\u04d1\u04d4")
buf.write("\5\62\32\2\u04d2\u04d4\7Y\2\2\u04d3\u04d0\3\2\2\2\u04d3")
buf.write("\u04d1\3\2\2\2\u04d3\u04d2\3\2\2\2\u04d4\u00a5\3\2\2\2")
buf.write("\u04d5\u04d7\5\64\33\2\u04d6\u04d5\3\2\2\2\u04d6\u04d7")
buf.write("\3\2\2\2\u04d7\u04d8\3\2\2\2\u04d8\u04da\5`\61\2\u04d9")
buf.write("\u04db\5\u00a8U\2\u04da\u04d9\3\2\2\2\u04da\u04db\3\2")
buf.write("\2\2\u04db\u04dc\3\2\2\2\u04dc\u04dd\5\u0092J\2\u04dd")
buf.write("\u00a7\3\2\2\2\u04de\u04df\bU\1\2\u04df\u04e0\5\62\32")
buf.write("\2\u04e0\u04e5\3\2\2\2\u04e1\u04e2\f\3\2\2\u04e2\u04e4")
buf.write("\5\62\32\2\u04e3\u04e1\3\2\2\2\u04e4\u04e7\3\2\2\2\u04e5")
buf.write("\u04e3\3\2\2\2\u04e5\u04e6\3\2\2\2\u04e6\u00a9\3\2\2\2")
buf.write("\u04e7\u04e5\3\2\2\2\u008c\u00af\u00b7\u00cb\u00dc\u00e6")
buf.write("\u010a\u0114\u0121\u0123\u012e\u0147\u0157\u0165\u0167")
buf.write("\u0173\u0175\u0181\u0183\u0195\u0197\u01a3\u01a5\u01b0")
buf.write("\u01bb\u01c6\u01d1\u01dc\u01e5\u01ec\u01f8\u01ff\u0204")
buf.write("\u0209\u020e\u0215\u021f\u0227\u0239\u023d\u0246\u0251")
buf.write("\u0256\u025b\u025f\u0263\u0265\u026f\u0274\u0278\u027c")
buf.write("\u0284\u028d\u0297\u029f\u02b0\u02bc\u02bf\u02c5\u02ce")
buf.write("\u02d3\u02d6\u02dd\u02ec\u02f8\u02fb\u02fd\u0305\u0309")
buf.write("\u0317\u031b\u0320\u0323\u0326\u032d\u032f\u0334\u0338")
buf.write("\u033d\u0341\u0344\u034d\u0355\u035f\u0367\u0369\u0373")
buf.write("\u0378\u037c\u0382\u0385\u038e\u0393\u0396\u039c\u03ac")
buf.write("\u03b2\u03b5\u03ba\u03bd\u03c4\u03d7\u03dd\u03e0\u03e2")
buf.write("\u03f1\u03f5\u03fc\u0401\u040e\u0417\u0420\u0433\u0436")
buf.write("\u043e\u0441\u0445\u044a\u0457\u045b\u0466\u046b\u046e")
buf.write("\u0479\u0481\u0494\u0498\u049c\u04a4\u04a8\u04ad\u04b8")
buf.write("\u04bf\u04c2\u04cd\u04d3\u04d6\u04da\u04e5")
return buf.getvalue()
class CParser ( Parser ):
grammarFileName = "C.bnf"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'__extension__'", "'__builtin_va_arg'",
"'__builtin_offsetof'", "'__m128'", "'__m128d'", "'__m128i'",
"'__typeof__'", "'__inline__'", "'__stdcall'", "'__declspec'",
"'__asm'", "'__attribute__'", "'__asm__'", "'__volatile__'",
"'auto'", "'break'", "'case'", "'char'", "'const'",
"'continue'", "'default'", "'do'", "'double'", "'else'",
"'enum'", "'extern'", "'float'", "'for'", "'goto'",
"'if'", "'inline'", "'int'", "'long'", "'register'",
"'restrict'", "'return'", "'short'", "'signed'", "'sizeof'",
"'static'", "'struct'", "'switch'", "'typedef'", "'union'",
"'unsigned'", "'void'", "'volatile'", "'while'", "'_Alignas'",
"'_Alignof'", "'_Atomic'", "'_Bool'", "'_Complex'",
"'_Generic'", "'_Imaginary'", "'_Noreturn'", "'_Static_assert'",
"'_Thread_local'", "'('", "')'", "'['", "']'", "'{'",
"'}'", "'<'", "'<='", "'>'", "'>='", "'<<'", "'>>'",
"'+'", "'++'", "'-'", "'--'", "'*'", "'/'", "'%'",
"'&'", "'|'", "'&&'", "'||'", "'^'", "'!'", "'~'",
"'?'", "':'", "';'", "','", "'='", "'*='", "'/='",
"'%='", "'+='", "'-='", "'<<='", "'>>='", "'&='", "'^='",
"'|='", "'=='", "'!='", "'->'", "'.'", "'...'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "Auto", "Break",
"Case", "Char", "Const", "Continue", "Default", "Do",
"Double", "Else", "Enum", "Extern", "Float", "For",
"Goto", "If", "Inline", "Int", "Long", "Register",
"Restrict", "Return", "Short", "Signed", "Sizeof",
"Static", "Struct", "Switch", "Typedef", "Union",
"Unsigned", "Void", "Volatile", "While", "Alignas",
"Alignof", "Atomic", "Bool", "Complex", "Generic",
"Imaginary", "Noreturn", "StaticAssert", "ThreadLocal",
"LeftParen", "RightParen", "LeftBracket", "RightBracket",
"LeftBrace", "RightBrace", "Less", "LessEqual", "Greater",
"GreaterEqual", "LeftShift", "RightShift", "Plus",
"PlusPlus", "Minus", "MinusMinus", "Star", "Div",
"Mod", "And", "Or", "AndAnd", "OrOr", "Caret", "Not",
"Tilde", "Question", "Colon", "Semi", "Comma", "Assign",
"StarAssign", "DivAssign", "ModAssign", "PlusAssign",
"MinusAssign", "LeftShiftAssign", "RightShiftAssign",
"AndAssign", "XorAssign", "OrAssign", "Equal", "NotEqual",
"Arrow", "Dot", "Ellipsis", "Identifier", "Constant",
"StringLiteral", "LineDirective", "PragmaDirective",
"Whitespace", "Newline", "BlockComment", "LineComment" ]
RULE_primaryExpression = 0
RULE_genericSelection = 1
RULE_genericAssocList = 2
RULE_genericAssociation = 3
RULE_postfixExpression = 4
RULE_argumentExpressionList = 5
RULE_unaryExpression = 6
RULE_unaryOperator = 7
RULE_castExpression = 8
RULE_multiplicativeExpression = 9
RULE_additiveExpression = 10
RULE_shiftExpression = 11
RULE_relationalExpression = 12
RULE_equalityExpression = 13
RULE_andExpression = 14
RULE_exclusiveOrExpression = 15
RULE_inclusiveOrExpression = 16
RULE_logicalAndExpression = 17
RULE_logicalOrExpression = 18
RULE_conditionalExpression = 19
RULE_assignmentExpression = 20
RULE_assignmentOperator = 21
RULE_expression = 22
RULE_constantExpression = 23
RULE_declaration = 24
RULE_declarationSpecifiers = 25
RULE_declarationSpecifiers2 = 26
RULE_declarationSpecifier = 27
RULE_initDeclaratorList = 28
RULE_initDeclarator = 29
RULE_storageClassSpecifier = 30
RULE_typeSpecifier = 31
RULE_structOrUnionSpecifier = 32
RULE_structOrUnion = 33
RULE_structDeclarationList = 34
RULE_structDeclaration = 35
RULE_specifierQualifierList = 36
RULE_structDeclaratorList = 37
RULE_structDeclarator = 38
RULE_enumSpecifier = 39
RULE_enumeratorList = 40
RULE_enumerator = 41
RULE_enumerationConstant = 42
RULE_atomicTypeSpecifier = 43
RULE_typeQualifier = 44
RULE_functionSpecifier = 45
RULE_alignmentSpecifier = 46
RULE_declarator = 47
RULE_directDeclarator = 48
RULE_gccDeclaratorExtension = 49
RULE_gccAttributeSpecifier = 50
RULE_gccAttributeList = 51
RULE_gccAttribute = 52
RULE_nestedParenthesesBlock = 53
RULE_pointer = 54
RULE_typeQualifierList = 55
RULE_parameterTypeList = 56
RULE_parameterList = 57
RULE_parameterDeclaration = 58
RULE_identifierList = 59
RULE_typeName = 60
RULE_abstractDeclarator = 61
RULE_directAbstractDeclarator = 62
RULE_typedefName = 63
RULE_initializer = 64
RULE_initializerList = 65
RULE_designation = 66
RULE_designatorList = 67
RULE_designator = 68
RULE_staticAssertDeclaration = 69
RULE_statement = 70
RULE_labeledStatement = 71
RULE_compoundStatement = 72
RULE_blockItemList = 73
RULE_blockItem = 74
RULE_expressionStatement = 75
RULE_selectionStatement = 76
RULE_iterationStatement = 77
RULE_jumpStatement = 78
RULE_compilationUnit = 79
RULE_translationUnit = 80
RULE_externalDeclaration = 81
RULE_functionDefinition = 82
RULE_declarationList = 83
ruleNames = [ "primaryExpression", "genericSelection", "genericAssocList",
"genericAssociation", "postfixExpression", "argumentExpressionList",
"unaryExpression", "unaryOperator", "castExpression",
"multiplicativeExpression", "additiveExpression", "shiftExpression",
"relationalExpression", "equalityExpression", "andExpression",
"exclusiveOrExpression", "inclusiveOrExpression", "logicalAndExpression",
"logicalOrExpression", "conditionalExpression", "assignmentExpression",
"assignmentOperator", "expression", "constantExpression",
"declaration", "declarationSpecifiers", "declarationSpecifiers2",
"declarationSpecifier", "initDeclaratorList", "initDeclarator",
"storageClassSpecifier", "typeSpecifier", "structOrUnionSpecifier",
"structOrUnion", "structDeclarationList", "structDeclaration",
"specifierQualifierList", "structDeclaratorList", "structDeclarator",
"enumSpecifier", "enumeratorList", "enumerator", "enumerationConstant",
"atomicTypeSpecifier", "typeQualifier", "functionSpecifier",
"alignmentSpecifier", "declarator", "directDeclarator",
"gccDeclaratorExtension", "gccAttributeSpecifier", "gccAttributeList",
"gccAttribute", "nestedParenthesesBlock", "pointer",
"typeQualifierList", "parameterTypeList", "parameterList",
"parameterDeclaration", "identifierList", "typeName",
"abstractDeclarator", "directAbstractDeclarator", "typedefName",
"initializer", "initializerList", "designation", "designatorList",
"designator", "staticAssertDeclaration", "statement",
"labeledStatement", "compoundStatement", "blockItemList",
"blockItem", "expressionStatement", "selectionStatement",
"iterationStatement", "jumpStatement", "compilationUnit",
"translationUnit", "externalDeclaration", "functionDefinition",
"declarationList" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
Auto=15
Break=16
Case=17
Char=18
Const=19
Continue=20
Default=21
Do=22
Double=23
Else=24
Enum=25
Extern=26
Float=27
For=28
Goto=29
If=30
Inline=31
Int=32
Long=33
Register=34
Restrict=35
Return=36
Short=37
Signed=38
Sizeof=39
Static=40
Struct=41
Switch=42
Typedef=43
Union=44
Unsigned=45
Void=46
Volatile=47
While=48
Alignas=49
Alignof=50
Atomic=51
Bool=52
Complex=53
Generic=54
Imaginary=55
Noreturn=56
StaticAssert=57
ThreadLocal=58
LeftParen=59
RightParen=60
LeftBracket=61
RightBracket=62
LeftBrace=63
RightBrace=64
Less=65
LessEqual=66
Greater=67
GreaterEqual=68
LeftShift=69
RightShift=70
Plus=71
PlusPlus=72
Minus=73
MinusMinus=74
Star=75
Div=76
Mod=77
And=78
Or=79
AndAnd=80
OrOr=81
Caret=82
Not=83
Tilde=84
Question=85
Colon=86
Semi=87
Comma=88
Assign=89
StarAssign=90
DivAssign=91
ModAssign=92
PlusAssign=93
MinusAssign=94
LeftShiftAssign=95
RightShiftAssign=96
AndAssign=97
XorAssign=98
OrAssign=99
Equal=100
NotEqual=101
Arrow=102
Dot=103
Ellipsis=104
Identifier=105
Constant=106
StringLiteral=107
LineDirective=108
PragmaDirective=109
Whitespace=110
Newline=111
BlockComment=112
LineComment=113
def __init__(self, input:TokenStream):
super().__init__(input)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class PrimaryExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def Constant(self):
return self.getToken(CParser.Constant, 0)
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def genericSelection(self):
return self.getTypedRuleContext(CParser.GenericSelectionContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def getRuleIndex(self):
return CParser.RULE_primaryExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimaryExpression" ):
listener.enterPrimaryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimaryExpression" ):
listener.exitPrimaryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpression" ):
return visitor.visitPrimaryExpression(self)
else:
return visitor.visitChildren(self)
def primaryExpression(self):
localctx = CParser.PrimaryExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_primaryExpression)
self._la = 0 # Token type
try:
self.state = 201
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 168
self.match(CParser.Identifier)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 169
self.match(CParser.Constant)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 171
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 170
self.match(CParser.StringLiteral)
else:
raise NoViableAltException(self)
self.state = 173
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 175
self.match(CParser.LeftParen)
self.state = 176
self.expression(0)
self.state = 177
self.match(CParser.RightParen)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 179
self.genericSelection()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 181
_la = self._input.LA(1)
if _la==CParser.T__0:
self.state = 180
self.match(CParser.T__0)
self.state = 183
self.match(CParser.LeftParen)
self.state = 184
self.compoundStatement()
self.state = 185
self.match(CParser.RightParen)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 187
self.match(CParser.T__1)
self.state = 188
self.match(CParser.LeftParen)
self.state = 189
self.unaryExpression()
self.state = 190
self.match(CParser.Comma)
self.state = 191
self.typeName()
self.state = 192
self.match(CParser.RightParen)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 194
self.match(CParser.T__2)
self.state = 195
self.match(CParser.LeftParen)
self.state = 196
self.typeName()
self.state = 197
self.match(CParser.Comma)
self.state = 198
self.unaryExpression()
self.state = 199
self.match(CParser.RightParen)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenericSelectionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def genericAssocList(self):
return self.getTypedRuleContext(CParser.GenericAssocListContext,0)
def getRuleIndex(self):
return CParser.RULE_genericSelection
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericSelection" ):
listener.enterGenericSelection(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericSelection" ):
listener.exitGenericSelection(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericSelection" ):
return visitor.visitGenericSelection(self)
else:
return visitor.visitChildren(self)
def genericSelection(self):
localctx = CParser.GenericSelectionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_genericSelection)
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(CParser.Generic)
self.state = 204
self.match(CParser.LeftParen)
self.state = 205
self.assignmentExpression()
self.state = 206
self.match(CParser.Comma)
self.state = 207
self.genericAssocList(0)
self.state = 208
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenericAssocListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def genericAssociation(self):
return self.getTypedRuleContext(CParser.GenericAssociationContext,0)
def genericAssocList(self):
return self.getTypedRuleContext(CParser.GenericAssocListContext,0)
def getRuleIndex(self):
return CParser.RULE_genericAssocList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericAssocList" ):
listener.enterGenericAssocList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericAssocList" ):
listener.exitGenericAssocList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericAssocList" ):
return visitor.visitGenericAssocList(self)
else:
return visitor.visitChildren(self)
def genericAssocList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.GenericAssocListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_genericAssocList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 211
self.genericAssociation()
self._ctx.stop = self._input.LT(-1)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.GenericAssocListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_genericAssocList)
self.state = 213
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 214
self.match(CParser.Comma)
self.state = 215
self.genericAssociation()
self.state = 220
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class GenericAssociationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_genericAssociation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericAssociation" ):
listener.enterGenericAssociation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericAssociation" ):
listener.exitGenericAssociation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericAssociation" ):
return visitor.visitGenericAssociation(self)
else:
return visitor.visitChildren(self)
def genericAssociation(self):
localctx = CParser.GenericAssociationContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_genericAssociation)
try:
self.state = 228
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Float, CParser.Int, CParser.Long, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Struct, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 221
self.typeName()
self.state = 222
self.match(CParser.Colon)
self.state = 223
self.assignmentExpression()
elif token in [CParser.Default]:
self.enterOuterAlt(localctx, 2)
self.state = 225
self.match(CParser.Default)
self.state = 226
self.match(CParser.Colon)
self.state = 227
self.assignmentExpression()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PostfixExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def primaryExpression(self):
return self.getTypedRuleContext(CParser.PrimaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def postfixExpression(self):
return self.getTypedRuleContext(CParser.PostfixExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_postfixExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPostfixExpression" ):
listener.enterPostfixExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPostfixExpression" ):
listener.exitPostfixExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPostfixExpression" ):
return visitor.visitPostfixExpression(self)
else:
return visitor.visitChildren(self)
def postfixExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.PostfixExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 8
self.enterRecursionRule(localctx, 8, self.RULE_postfixExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 264
la_ = self._interp.adaptivePredict(self._input,5,self._ctx)
if la_ == 1:
self.state = 231
self.primaryExpression()
pass
elif la_ == 2:
self.state = 232
self.match(CParser.LeftParen)
self.state = 233
self.typeName()
self.state = 234
self.match(CParser.RightParen)
self.state = 235
self.match(CParser.LeftBrace)
self.state = 236
self.initializerList(0)
self.state = 237
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.state = 239
self.match(CParser.LeftParen)
self.state = 240
self.typeName()
self.state = 241
self.match(CParser.RightParen)
self.state = 242
self.match(CParser.LeftBrace)
self.state = 243
self.initializerList(0)
self.state = 244
self.match(CParser.Comma)
self.state = 245
self.match(CParser.RightBrace)
pass
elif la_ == 4:
self.state = 247
self.match(CParser.T__0)
self.state = 248
self.match(CParser.LeftParen)
self.state = 249
self.typeName()
self.state = 250
self.match(CParser.RightParen)
self.state = 251
self.match(CParser.LeftBrace)
self.state = 252
self.initializerList(0)
self.state = 253
self.match(CParser.RightBrace)
pass
elif la_ == 5:
self.state = 255
self.match(CParser.T__0)
self.state = 256
self.match(CParser.LeftParen)
self.state = 257
self.typeName()
self.state = 258
self.match(CParser.RightParen)
self.state = 259
self.match(CParser.LeftBrace)
self.state = 260
self.initializerList(0)
self.state = 261
self.match(CParser.Comma)
self.state = 262
self.match(CParser.RightBrace)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 289
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 287
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 266
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 10)")
self.state = 267
self.match(CParser.LeftBracket)
self.state = 268
self.expression(0)
self.state = 269
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 271
if not self.precpred(self._ctx, 9):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 9)")
self.state = 272
self.match(CParser.LeftParen)
self.state = 274
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 273
self.argumentExpressionList(0)
self.state = 276
self.match(CParser.RightParen)
pass
elif la_ == 3:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 277
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 8)")
self.state = 278
self.match(CParser.Dot)
self.state = 279
self.match(CParser.Identifier)
pass
elif la_ == 4:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 280
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 281
self.match(CParser.Arrow)
self.state = 282
self.match(CParser.Identifier)
pass
elif la_ == 5:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 283
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 284
self.match(CParser.PlusPlus)
pass
elif la_ == 6:
localctx = CParser.PostfixExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 285
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 286
self.match(CParser.MinusMinus)
pass
self.state = 291
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ArgumentExpressionListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def getRuleIndex(self):
return CParser.RULE_argumentExpressionList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgumentExpressionList" ):
listener.enterArgumentExpressionList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgumentExpressionList" ):
listener.exitArgumentExpressionList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgumentExpressionList" ):
return visitor.visitArgumentExpressionList(self)
else:
return visitor.visitChildren(self)
def argumentExpressionList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ArgumentExpressionListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 10
self.enterRecursionRule(localctx, 10, self.RULE_argumentExpressionList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 293
self.assignmentExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 300
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ArgumentExpressionListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_argumentExpressionList)
self.state = 295
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 296
self.match(CParser.Comma)
self.state = 297
self.assignmentExpression()
self.state = 302
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class UnaryExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def postfixExpression(self):
return self.getTypedRuleContext(CParser.PostfixExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def unaryOperator(self):
return self.getTypedRuleContext(CParser.UnaryOperatorContext,0)
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_unaryExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryExpression" ):
listener.enterUnaryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryExpression" ):
listener.exitUnaryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnaryExpression" ):
return visitor.visitUnaryExpression(self)
else:
return visitor.visitChildren(self)
def unaryExpression(self):
localctx = CParser.UnaryExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_unaryExpression)
try:
self.state = 325
la_ = self._interp.adaptivePredict(self._input,10,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 303
self.postfixExpression(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 304
self.match(CParser.PlusPlus)
self.state = 305
self.unaryExpression()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 306
self.match(CParser.MinusMinus)
self.state = 307
self.unaryExpression()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 308
self.unaryOperator()
self.state = 309
self.castExpression()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 311
self.match(CParser.Sizeof)
self.state = 312
self.unaryExpression()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 313
self.match(CParser.Sizeof)
self.state = 314
self.match(CParser.LeftParen)
self.state = 315
self.typeName()
self.state = 316
self.match(CParser.RightParen)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 318
self.match(CParser.Alignof)
self.state = 319
self.match(CParser.LeftParen)
self.state = 320
self.typeName()
self.state = 321
self.match(CParser.RightParen)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 323
self.match(CParser.AndAnd)
self.state = 324
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnaryOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_unaryOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryOperator" ):
listener.enterUnaryOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryOperator" ):
listener.exitUnaryOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnaryOperator" ):
return visitor.visitUnaryOperator(self)
else:
return visitor.visitChildren(self)
def unaryOperator(self):
localctx = CParser.UnaryOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_unaryOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 327
_la = self._input.LA(1)
if not(((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CastExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_castExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCastExpression" ):
listener.enterCastExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCastExpression" ):
listener.exitCastExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCastExpression" ):
return visitor.visitCastExpression(self)
else:
return visitor.visitChildren(self)
def castExpression(self):
localctx = CParser.CastExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_castExpression)
try:
self.state = 341
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 329
self.unaryExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 330
self.match(CParser.LeftParen)
self.state = 331
self.typeName()
self.state = 332
self.match(CParser.RightParen)
self.state = 333
self.castExpression()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 335
self.match(CParser.T__0)
self.state = 336
self.match(CParser.LeftParen)
self.state = 337
self.typeName()
self.state = 338
self.match(CParser.RightParen)
self.state = 339
self.castExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MultiplicativeExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def castExpression(self):
return self.getTypedRuleContext(CParser.CastExpressionContext,0)
def multiplicativeExpression(self):
return self.getTypedRuleContext(CParser.MultiplicativeExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_multiplicativeExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultiplicativeExpression" ):
listener.enterMultiplicativeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultiplicativeExpression" ):
listener.exitMultiplicativeExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultiplicativeExpression" ):
return visitor.visitMultiplicativeExpression(self)
else:
return visitor.visitChildren(self)
def multiplicativeExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.MultiplicativeExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 18
self.enterRecursionRule(localctx, 18, self.RULE_multiplicativeExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 344
self.castExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 357
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 355
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 346
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 347
self.match(CParser.Star)
self.state = 348
self.castExpression()
pass
elif la_ == 2:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 349
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 350
self.match(CParser.Div)
self.state = 351
self.castExpression()
pass
elif la_ == 3:
localctx = CParser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 352
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 353
self.match(CParser.Mod)
self.state = 354
self.castExpression()
pass
self.state = 359
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AdditiveExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def multiplicativeExpression(self):
return self.getTypedRuleContext(CParser.MultiplicativeExpressionContext,0)
def additiveExpression(self):
return self.getTypedRuleContext(CParser.AdditiveExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_additiveExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAdditiveExpression" ):
listener.enterAdditiveExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAdditiveExpression" ):
listener.exitAdditiveExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAdditiveExpression" ):
return visitor.visitAdditiveExpression(self)
else:
return visitor.visitChildren(self)
def additiveExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.AdditiveExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 20
self.enterRecursionRule(localctx, 20, self.RULE_additiveExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 361
self.multiplicativeExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 371
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 369
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
localctx = CParser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 363
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 364
self.match(CParser.Plus)
self.state = 365
self.multiplicativeExpression(0)
pass
elif la_ == 2:
localctx = CParser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 366
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 367
self.match(CParser.Minus)
self.state = 368
self.multiplicativeExpression(0)
pass
self.state = 373
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ShiftExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def additiveExpression(self):
return self.getTypedRuleContext(CParser.AdditiveExpressionContext,0)
def shiftExpression(self):
return self.getTypedRuleContext(CParser.ShiftExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_shiftExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShiftExpression" ):
listener.enterShiftExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShiftExpression" ):
listener.exitShiftExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShiftExpression" ):
return visitor.visitShiftExpression(self)
else:
return visitor.visitChildren(self)
def shiftExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ShiftExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 22
self.enterRecursionRule(localctx, 22, self.RULE_shiftExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 375
self.additiveExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 385
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 383
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
localctx = CParser.ShiftExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_shiftExpression)
self.state = 377
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 378
self.match(CParser.LeftShift)
self.state = 379
self.additiveExpression(0)
pass
elif la_ == 2:
localctx = CParser.ShiftExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_shiftExpression)
self.state = 380
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 381
self.match(CParser.RightShift)
self.state = 382
self.additiveExpression(0)
pass
self.state = 387
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class RelationalExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def shiftExpression(self):
return self.getTypedRuleContext(CParser.ShiftExpressionContext,0)
def relationalExpression(self):
return self.getTypedRuleContext(CParser.RelationalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_relationalExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelationalExpression" ):
listener.enterRelationalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelationalExpression" ):
listener.exitRelationalExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationalExpression" ):
return visitor.visitRelationalExpression(self)
else:
return visitor.visitChildren(self)
def relationalExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.RelationalExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 24
self.enterRecursionRule(localctx, 24, self.RULE_relationalExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 389
self.shiftExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 405
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 403
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 391
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 392
self.match(CParser.Less)
self.state = 393
self.shiftExpression(0)
pass
elif la_ == 2:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 394
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 395
self.match(CParser.Greater)
self.state = 396
self.shiftExpression(0)
pass
elif la_ == 3:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 397
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 398
self.match(CParser.LessEqual)
self.state = 399
self.shiftExpression(0)
pass
elif la_ == 4:
localctx = CParser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 400
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 401
self.match(CParser.GreaterEqual)
self.state = 402
self.shiftExpression(0)
pass
self.state = 407
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class EqualityExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def relationalExpression(self):
return self.getTypedRuleContext(CParser.RelationalExpressionContext,0)
def equalityExpression(self):
return self.getTypedRuleContext(CParser.EqualityExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_equalityExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEqualityExpression" ):
listener.enterEqualityExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEqualityExpression" ):
listener.exitEqualityExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEqualityExpression" ):
return visitor.visitEqualityExpression(self)
else:
return visitor.visitChildren(self)
def equalityExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.EqualityExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 26
self.enterRecursionRule(localctx, 26, self.RULE_equalityExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 409
self.relationalExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 419
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 417
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
localctx = CParser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 411
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 412
self.match(CParser.Equal)
self.state = 413
self.relationalExpression(0)
pass
elif la_ == 2:
localctx = CParser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 414
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 415
self.match(CParser.NotEqual)
self.state = 416
self.relationalExpression(0)
pass
self.state = 421
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AndExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def equalityExpression(self):
return self.getTypedRuleContext(CParser.EqualityExpressionContext,0)
def andExpression(self):
return self.getTypedRuleContext(CParser.AndExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_andExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndExpression" ):
listener.enterAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndExpression" ):
listener.exitAndExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAndExpression" ):
return visitor.visitAndExpression(self)
else:
return visitor.visitChildren(self)
def andExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.AndExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 28
self.enterRecursionRule(localctx, 28, self.RULE_andExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 423
self.equalityExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 430
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.AndExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_andExpression)
self.state = 425
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 426
self.match(CParser.And)
self.state = 427
self.equalityExpression(0)
self.state = 432
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ExclusiveOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def andExpression(self):
return self.getTypedRuleContext(CParser.AndExpressionContext,0)
def exclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.ExclusiveOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_exclusiveOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExclusiveOrExpression" ):
listener.enterExclusiveOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExclusiveOrExpression" ):
listener.exitExclusiveOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExclusiveOrExpression" ):
return visitor.visitExclusiveOrExpression(self)
else:
return visitor.visitChildren(self)
def exclusiveOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ExclusiveOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 30
self.enterRecursionRule(localctx, 30, self.RULE_exclusiveOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 434
self.andExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 441
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,23,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ExclusiveOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_exclusiveOrExpression)
self.state = 436
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 437
self.match(CParser.Caret)
self.state = 438
self.andExpression(0)
self.state = 443
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,23,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class InclusiveOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.ExclusiveOrExpressionContext,0)
def inclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.InclusiveOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_inclusiveOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInclusiveOrExpression" ):
listener.enterInclusiveOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInclusiveOrExpression" ):
listener.exitInclusiveOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInclusiveOrExpression" ):
return visitor.visitInclusiveOrExpression(self)
else:
return visitor.visitChildren(self)
def inclusiveOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InclusiveOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 32
self.enterRecursionRule(localctx, 32, self.RULE_inclusiveOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 445
self.exclusiveOrExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 452
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,24,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InclusiveOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_inclusiveOrExpression)
self.state = 447
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 448
self.match(CParser.Or)
self.state = 449
self.exclusiveOrExpression(0)
self.state = 454
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,24,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalAndExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def inclusiveOrExpression(self):
return self.getTypedRuleContext(CParser.InclusiveOrExpressionContext,0)
def logicalAndExpression(self):
return self.getTypedRuleContext(CParser.LogicalAndExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_logicalAndExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalAndExpression" ):
listener.enterLogicalAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalAndExpression" ):
listener.exitLogicalAndExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalAndExpression" ):
return visitor.visitLogicalAndExpression(self)
else:
return visitor.visitChildren(self)
def logicalAndExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.LogicalAndExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_logicalAndExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.inclusiveOrExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 463
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.LogicalAndExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalAndExpression)
self.state = 458
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 459
self.match(CParser.AndAnd)
self.state = 460
self.inclusiveOrExpression(0)
self.state = 465
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,25,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def logicalAndExpression(self):
return self.getTypedRuleContext(CParser.LogicalAndExpressionContext,0)
def logicalOrExpression(self):
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_logicalOrExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalOrExpression" ):
listener.enterLogicalOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalOrExpression" ):
listener.exitLogicalOrExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalOrExpression" ):
return visitor.visitLogicalOrExpression(self)
else:
return visitor.visitChildren(self)
def logicalOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.LogicalOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 36
self.enterRecursionRule(localctx, 36, self.RULE_logicalOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 467
self.logicalAndExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 474
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,26,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.LogicalOrExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalOrExpression)
self.state = 469
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 470
self.match(CParser.OrOr)
self.state = 471
self.logicalAndExpression(0)
self.state = 476
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,26,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ConditionalExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def logicalOrExpression(self):
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_conditionalExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConditionalExpression" ):
listener.enterConditionalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConditionalExpression" ):
listener.exitConditionalExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConditionalExpression" ):
return visitor.visitConditionalExpression(self)
else:
return visitor.visitChildren(self)
def conditionalExpression(self):
localctx = CParser.ConditionalExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_conditionalExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 477
self.logicalOrExpression(0)
self.state = 483
la_ = self._interp.adaptivePredict(self._input,27,self._ctx)
if la_ == 1:
self.state = 478
self.match(CParser.Question)
self.state = 479
self.expression(0)
self.state = 480
self.match(CParser.Colon)
self.state = 481
self.conditionalExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def assignmentOperator(self):
return self.getTypedRuleContext(CParser.AssignmentOperatorContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_assignmentExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignmentExpression" ):
listener.enterAssignmentExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignmentExpression" ):
listener.exitAssignmentExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentExpression" ):
return visitor.visitAssignmentExpression(self)
else:
return visitor.visitChildren(self)
def assignmentExpression(self):
localctx = CParser.AssignmentExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_assignmentExpression)
try:
self.state = 490
la_ = self._interp.adaptivePredict(self._input,28,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 485
self.conditionalExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 486
self.unaryExpression()
self.state = 487
self.assignmentOperator()
self.state = 488
self.assignmentExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_assignmentOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignmentOperator" ):
listener.enterAssignmentOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignmentOperator" ):
listener.exitAssignmentOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentOperator" ):
return visitor.visitAssignmentOperator(self)
else:
return visitor.visitChildren(self)
def assignmentOperator(self):
localctx = CParser.AssignmentOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_assignmentOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 492
_la = self._input.LA(1)
if not(((((_la - 89)) & ~0x3f) == 0 and ((1 << (_la - 89)) & ((1 << (CParser.Assign - 89)) | (1 << (CParser.StarAssign - 89)) | (1 << (CParser.DivAssign - 89)) | (1 << (CParser.ModAssign - 89)) | (1 << (CParser.PlusAssign - 89)) | (1 << (CParser.MinusAssign - 89)) | (1 << (CParser.LeftShiftAssign - 89)) | (1 << (CParser.RightShiftAssign - 89)) | (1 << (CParser.AndAssign - 89)) | (1 << (CParser.XorAssign - 89)) | (1 << (CParser.OrAssign - 89)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpression" ):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 44
self.enterRecursionRule(localctx, 44, self.RULE_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 495
self.assignmentExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 502
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 497
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 498
self.match(CParser.Comma)
self.state = 499
self.assignmentExpression()
self.state = 504
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ConstantExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def conditionalExpression(self):
return self.getTypedRuleContext(CParser.ConditionalExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_constantExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConstantExpression" ):
listener.enterConstantExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConstantExpression" ):
listener.exitConstantExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConstantExpression" ):
return visitor.visitConstantExpression(self)
else:
return visitor.visitChildren(self)
def constantExpression(self):
localctx = CParser.ConstantExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_constantExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 505
self.conditionalExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def initDeclaratorList(self):
return self.getTypedRuleContext(CParser.InitDeclaratorListContext,0)
def staticAssertDeclaration(self):
return self.getTypedRuleContext(CParser.StaticAssertDeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_declaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclaration" ):
listener.enterDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclaration" ):
listener.exitDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclaration" ):
return visitor.visitDeclaration(self)
else:
return visitor.visitChildren(self)
def declaration(self):
localctx = CParser.DeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_declaration)
self._la = 0 # Token type
try:
self.state = 514
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__11, CParser.Auto, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Extern, CParser.Float, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Static, CParser.Struct, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Alignas, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Noreturn, CParser.ThreadLocal, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 507
self.declarationSpecifiers()
self.state = 509
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 508
self.initDeclaratorList(0)
self.state = 511
self.match(CParser.Semi)
elif token in [CParser.StaticAssert]:
self.enterOuterAlt(localctx, 2)
self.state = 513
self.staticAssertDeclaration()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifiersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.DeclarationSpecifierContext)
else:
return self.getTypedRuleContext(CParser.DeclarationSpecifierContext,i)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifiers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifiers" ):
listener.enterDeclarationSpecifiers(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifiers" ):
listener.exitDeclarationSpecifiers(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifiers" ):
return visitor.visitDeclarationSpecifiers(self)
else:
return visitor.visitChildren(self)
def declarationSpecifiers(self):
localctx = CParser.DeclarationSpecifiersContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_declarationSpecifiers)
try:
self.enterOuterAlt(localctx, 1)
self.state = 517
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 516
self.declarationSpecifier()
else:
raise NoViableAltException(self)
self.state = 519
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,32,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifiers2Context(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.DeclarationSpecifierContext)
else:
return self.getTypedRuleContext(CParser.DeclarationSpecifierContext,i)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifiers2
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifiers2" ):
listener.enterDeclarationSpecifiers2(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifiers2" ):
listener.exitDeclarationSpecifiers2(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifiers2" ):
return visitor.visitDeclarationSpecifiers2(self)
else:
return visitor.visitChildren(self)
def declarationSpecifiers2(self):
localctx = CParser.DeclarationSpecifiers2Context(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_declarationSpecifiers2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 522
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 521
self.declarationSpecifier()
else:
raise NoViableAltException(self)
self.state = 524
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,33,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def storageClassSpecifier(self):
return self.getTypedRuleContext(CParser.StorageClassSpecifierContext,0)
def typeSpecifier(self):
return self.getTypedRuleContext(CParser.TypeSpecifierContext,0)
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def functionSpecifier(self):
return self.getTypedRuleContext(CParser.FunctionSpecifierContext,0)
def alignmentSpecifier(self):
return self.getTypedRuleContext(CParser.AlignmentSpecifierContext,0)
def getRuleIndex(self):
return CParser.RULE_declarationSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationSpecifier" ):
listener.enterDeclarationSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationSpecifier" ):
listener.exitDeclarationSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationSpecifier" ):
return visitor.visitDeclarationSpecifier(self)
else:
return visitor.visitChildren(self)
def declarationSpecifier(self):
localctx = CParser.DeclarationSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_declarationSpecifier)
try:
self.state = 531
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 526
self.storageClassSpecifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 527
self.typeSpecifier()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 528
self.typeQualifier()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 529
self.functionSpecifier()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 530
self.alignmentSpecifier()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitDeclaratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def initDeclarator(self):
return self.getTypedRuleContext(CParser.InitDeclaratorContext,0)
def initDeclaratorList(self):
return self.getTypedRuleContext(CParser.InitDeclaratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_initDeclaratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitDeclaratorList" ):
listener.enterInitDeclaratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitDeclaratorList" ):
listener.exitInitDeclaratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitDeclaratorList" ):
return visitor.visitInitDeclaratorList(self)
else:
return visitor.visitChildren(self)
def initDeclaratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InitDeclaratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 56
self.enterRecursionRule(localctx, 56, self.RULE_initDeclaratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 534
self.initDeclarator()
self._ctx.stop = self._input.LT(-1)
self.state = 541
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,35,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InitDeclaratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_initDeclaratorList)
self.state = 536
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 537
self.match(CParser.Comma)
self.state = 538
self.initDeclarator()
self.state = 543
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,35,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class InitDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def initializer(self):
return self.getTypedRuleContext(CParser.InitializerContext,0)
def getRuleIndex(self):
return CParser.RULE_initDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitDeclarator" ):
listener.enterInitDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitDeclarator" ):
listener.exitInitDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitDeclarator" ):
return visitor.visitInitDeclarator(self)
else:
return visitor.visitChildren(self)
def initDeclarator(self):
localctx = CParser.InitDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_initDeclarator)
try:
self.state = 549
la_ = self._interp.adaptivePredict(self._input,36,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 544
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 545
self.declarator()
self.state = 546
self.match(CParser.Assign)
self.state = 547
self.initializer()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StorageClassSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_storageClassSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStorageClassSpecifier" ):
listener.enterStorageClassSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStorageClassSpecifier" ):
listener.exitStorageClassSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStorageClassSpecifier" ):
return visitor.visitStorageClassSpecifier(self)
else:
return visitor.visitChildren(self)
def storageClassSpecifier(self):
localctx = CParser.StorageClassSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_storageClassSpecifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 551
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Auto) | (1 << CParser.Extern) | (1 << CParser.Register) | (1 << CParser.Static) | (1 << CParser.Typedef) | (1 << CParser.ThreadLocal))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def atomicTypeSpecifier(self):
return self.getTypedRuleContext(CParser.AtomicTypeSpecifierContext,0)
def structOrUnionSpecifier(self):
return self.getTypedRuleContext(CParser.StructOrUnionSpecifierContext,0)
def enumSpecifier(self):
return self.getTypedRuleContext(CParser.EnumSpecifierContext,0)
def typedefName(self):
return self.getTypedRuleContext(CParser.TypedefNameContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_typeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeSpecifier" ):
listener.enterTypeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeSpecifier" ):
listener.exitTypeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeSpecifier" ):
return visitor.visitTypeSpecifier(self)
else:
return visitor.visitChildren(self)
def typeSpecifier(self):
localctx = CParser.TypeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_typeSpecifier)
self._la = 0 # Token type
try:
self.state = 567
token = self._input.LA(1)
if token in [CParser.T__3, CParser.T__4, CParser.T__5, CParser.Char, CParser.Double, CParser.Float, CParser.Int, CParser.Long, CParser.Short, CParser.Signed, CParser.Unsigned, CParser.Void, CParser.Bool, CParser.Complex]:
self.enterOuterAlt(localctx, 1)
self.state = 553
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.Char) | (1 << CParser.Double) | (1 << CParser.Float) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Bool) | (1 << CParser.Complex))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.T__0]:
self.enterOuterAlt(localctx, 2)
self.state = 554
self.match(CParser.T__0)
self.state = 555
self.match(CParser.LeftParen)
self.state = 556
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 557
self.match(CParser.RightParen)
elif token in [CParser.Atomic]:
self.enterOuterAlt(localctx, 3)
self.state = 558
self.atomicTypeSpecifier()
elif token in [CParser.Struct, CParser.Union]:
self.enterOuterAlt(localctx, 4)
self.state = 559
self.structOrUnionSpecifier()
elif token in [CParser.Enum]:
self.enterOuterAlt(localctx, 5)
self.state = 560
self.enumSpecifier()
elif token in [CParser.Identifier]:
self.enterOuterAlt(localctx, 6)
self.state = 561
self.typedefName()
elif token in [CParser.T__6]:
self.enterOuterAlt(localctx, 7)
self.state = 562
self.match(CParser.T__6)
self.state = 563
self.match(CParser.LeftParen)
self.state = 564
self.constantExpression()
self.state = 565
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructOrUnionSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structOrUnion(self):
return self.getTypedRuleContext(CParser.StructOrUnionContext,0)
def structDeclarationList(self):
return self.getTypedRuleContext(CParser.StructDeclarationListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_structOrUnionSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructOrUnionSpecifier" ):
listener.enterStructOrUnionSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructOrUnionSpecifier" ):
listener.exitStructOrUnionSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructOrUnionSpecifier" ):
return visitor.visitStructOrUnionSpecifier(self)
else:
return visitor.visitChildren(self)
def structOrUnionSpecifier(self):
localctx = CParser.StructOrUnionSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_structOrUnionSpecifier)
self._la = 0 # Token type
try:
self.state = 580
la_ = self._interp.adaptivePredict(self._input,39,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 569
self.structOrUnion()
self.state = 571
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 570
self.match(CParser.Identifier)
self.state = 573
self.match(CParser.LeftBrace)
self.state = 574
self.structDeclarationList(0)
self.state = 575
self.match(CParser.RightBrace)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 577
self.structOrUnion()
self.state = 578
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructOrUnionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_structOrUnion
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructOrUnion" ):
listener.enterStructOrUnion(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructOrUnion" ):
listener.exitStructOrUnion(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructOrUnion" ):
return visitor.visitStructOrUnion(self)
else:
return visitor.visitChildren(self)
def structOrUnion(self):
localctx = CParser.StructOrUnionContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_structOrUnion)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 582
_la = self._input.LA(1)
if not(_la==CParser.Struct or _la==CParser.Union):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructDeclarationListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structDeclaration(self):
return self.getTypedRuleContext(CParser.StructDeclarationContext,0)
def structDeclarationList(self):
return self.getTypedRuleContext(CParser.StructDeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclarationList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclarationList" ):
listener.enterStructDeclarationList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclarationList" ):
listener.exitStructDeclarationList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclarationList" ):
return visitor.visitStructDeclarationList(self)
else:
return visitor.visitChildren(self)
def structDeclarationList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.StructDeclarationListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 68
self.enterRecursionRule(localctx, 68, self.RULE_structDeclarationList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 585
self.structDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 591
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,40,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.StructDeclarationListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_structDeclarationList)
self.state = 587
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 588
self.structDeclaration()
self.state = 593
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,40,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class StructDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def structDeclaratorList(self):
return self.getTypedRuleContext(CParser.StructDeclaratorListContext,0)
def staticAssertDeclaration(self):
return self.getTypedRuleContext(CParser.StaticAssertDeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclaration" ):
listener.enterStructDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclaration" ):
listener.exitStructDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclaration" ):
return visitor.visitStructDeclaration(self)
else:
return visitor.visitChildren(self)
def structDeclaration(self):
localctx = CParser.StructDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_structDeclaration)
self._la = 0 # Token type
try:
self.state = 601
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.Char, CParser.Const, CParser.Double, CParser.Enum, CParser.Float, CParser.Int, CParser.Long, CParser.Restrict, CParser.Short, CParser.Signed, CParser.Struct, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 594
self.specifierQualifierList()
self.state = 596
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Colon - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 595
self.structDeclaratorList(0)
self.state = 598
self.match(CParser.Semi)
elif token in [CParser.StaticAssert]:
self.enterOuterAlt(localctx, 2)
self.state = 600
self.staticAssertDeclaration()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecifierQualifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeSpecifier(self):
return self.getTypedRuleContext(CParser.TypeSpecifierContext,0)
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def getRuleIndex(self):
return CParser.RULE_specifierQualifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecifierQualifierList" ):
listener.enterSpecifierQualifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecifierQualifierList" ):
listener.exitSpecifierQualifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecifierQualifierList" ):
return visitor.visitSpecifierQualifierList(self)
else:
return visitor.visitChildren(self)
def specifierQualifierList(self):
localctx = CParser.SpecifierQualifierListContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_specifierQualifierList)
try:
self.state = 611
la_ = self._interp.adaptivePredict(self._input,45,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 603
self.typeSpecifier()
self.state = 605
la_ = self._interp.adaptivePredict(self._input,43,self._ctx)
if la_ == 1:
self.state = 604
self.specifierQualifierList()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 607
self.typeQualifier()
self.state = 609
la_ = self._interp.adaptivePredict(self._input,44,self._ctx)
if la_ == 1:
self.state = 608
self.specifierQualifierList()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StructDeclaratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def structDeclarator(self):
return self.getTypedRuleContext(CParser.StructDeclaratorContext,0)
def structDeclaratorList(self):
return self.getTypedRuleContext(CParser.StructDeclaratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclaratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclaratorList" ):
listener.enterStructDeclaratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclaratorList" ):
listener.exitStructDeclaratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclaratorList" ):
return visitor.visitStructDeclaratorList(self)
else:
return visitor.visitChildren(self)
def structDeclaratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.StructDeclaratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 74
self.enterRecursionRule(localctx, 74, self.RULE_structDeclaratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 614
self.structDeclarator()
self._ctx.stop = self._input.LT(-1)
self.state = 621
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.StructDeclaratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_structDeclaratorList)
self.state = 616
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 617
self.match(CParser.Comma)
self.state = 618
self.structDeclarator()
self.state = 623
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,46,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class StructDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_structDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStructDeclarator" ):
listener.enterStructDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStructDeclarator" ):
listener.exitStructDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStructDeclarator" ):
return visitor.visitStructDeclarator(self)
else:
return visitor.visitChildren(self)
def structDeclarator(self):
localctx = CParser.StructDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_structDeclarator)
self._la = 0 # Token type
try:
self.state = 630
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 624
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 626
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)) | (1 << (CParser.Identifier - 59)))) != 0):
self.state = 625
self.declarator()
self.state = 628
self.match(CParser.Colon)
self.state = 629
self.constantExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumeratorList(self):
return self.getTypedRuleContext(CParser.EnumeratorListContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_enumSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumSpecifier" ):
listener.enterEnumSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumSpecifier" ):
listener.exitEnumSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumSpecifier" ):
return visitor.visitEnumSpecifier(self)
else:
return visitor.visitChildren(self)
def enumSpecifier(self):
localctx = CParser.EnumSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_enumSpecifier)
self._la = 0 # Token type
try:
self.state = 651
la_ = self._interp.adaptivePredict(self._input,51,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 632
self.match(CParser.Enum)
self.state = 634
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 633
self.match(CParser.Identifier)
self.state = 636
self.match(CParser.LeftBrace)
self.state = 637
self.enumeratorList(0)
self.state = 638
self.match(CParser.RightBrace)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 640
self.match(CParser.Enum)
self.state = 642
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 641
self.match(CParser.Identifier)
self.state = 644
self.match(CParser.LeftBrace)
self.state = 645
self.enumeratorList(0)
self.state = 646
self.match(CParser.Comma)
self.state = 647
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 649
self.match(CParser.Enum)
self.state = 650
self.match(CParser.Identifier)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumeratorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumerator(self):
return self.getTypedRuleContext(CParser.EnumeratorContext,0)
def enumeratorList(self):
return self.getTypedRuleContext(CParser.EnumeratorListContext,0)
def getRuleIndex(self):
return CParser.RULE_enumeratorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumeratorList" ):
listener.enterEnumeratorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumeratorList" ):
listener.exitEnumeratorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumeratorList" ):
return visitor.visitEnumeratorList(self)
else:
return visitor.visitChildren(self)
def enumeratorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.EnumeratorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 80
self.enterRecursionRule(localctx, 80, self.RULE_enumeratorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 654
self.enumerator()
self._ctx.stop = self._input.LT(-1)
self.state = 661
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,52,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.EnumeratorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_enumeratorList)
self.state = 656
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 657
self.match(CParser.Comma)
self.state = 658
self.enumerator()
self.state = 663
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,52,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class EnumeratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def enumerationConstant(self):
return self.getTypedRuleContext(CParser.EnumerationConstantContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_enumerator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumerator" ):
listener.enterEnumerator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumerator" ):
listener.exitEnumerator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumerator" ):
return visitor.visitEnumerator(self)
else:
return visitor.visitChildren(self)
def enumerator(self):
localctx = CParser.EnumeratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_enumerator)
try:
self.state = 669
la_ = self._interp.adaptivePredict(self._input,53,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 664
self.enumerationConstant()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 665
self.enumerationConstant()
self.state = 666
self.match(CParser.Assign)
self.state = 667
self.constantExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumerationConstantContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_enumerationConstant
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumerationConstant" ):
listener.enterEnumerationConstant(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumerationConstant" ):
listener.exitEnumerationConstant(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumerationConstant" ):
return visitor.visitEnumerationConstant(self)
else:
return visitor.visitChildren(self)
def enumerationConstant(self):
localctx = CParser.EnumerationConstantContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_enumerationConstant)
try:
self.enterOuterAlt(localctx, 1)
self.state = 671
self.match(CParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomicTypeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def getRuleIndex(self):
return CParser.RULE_atomicTypeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomicTypeSpecifier" ):
listener.enterAtomicTypeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomicTypeSpecifier" ):
listener.exitAtomicTypeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomicTypeSpecifier" ):
return visitor.visitAtomicTypeSpecifier(self)
else:
return visitor.visitChildren(self)
def atomicTypeSpecifier(self):
localctx = CParser.AtomicTypeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_atomicTypeSpecifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 673
self.match(CParser.Atomic)
self.state = 674
self.match(CParser.LeftParen)
self.state = 675
self.typeName()
self.state = 676
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeQualifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return CParser.RULE_typeQualifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeQualifier" ):
listener.enterTypeQualifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeQualifier" ):
listener.exitTypeQualifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeQualifier" ):
return visitor.visitTypeQualifier(self)
else:
return visitor.visitChildren(self)
def typeQualifier(self):
localctx = CParser.TypeQualifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_typeQualifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 678
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttributeSpecifier(self):
return self.getTypedRuleContext(CParser.GccAttributeSpecifierContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_functionSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionSpecifier" ):
listener.enterFunctionSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionSpecifier" ):
listener.exitFunctionSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionSpecifier" ):
return visitor.visitFunctionSpecifier(self)
else:
return visitor.visitChildren(self)
def functionSpecifier(self):
localctx = CParser.FunctionSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_functionSpecifier)
self._la = 0 # Token type
try:
self.state = 686
token = self._input.LA(1)
if token in [CParser.T__7, CParser.T__8, CParser.Inline, CParser.Noreturn]:
self.enterOuterAlt(localctx, 1)
self.state = 680
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.Inline) | (1 << CParser.Noreturn))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.T__11]:
self.enterOuterAlt(localctx, 2)
self.state = 681
self.gccAttributeSpecifier()
elif token in [CParser.T__9]:
self.enterOuterAlt(localctx, 3)
self.state = 682
self.match(CParser.T__9)
self.state = 683
self.match(CParser.LeftParen)
self.state = 684
self.match(CParser.Identifier)
self.state = 685
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlignmentSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeName(self):
return self.getTypedRuleContext(CParser.TypeNameContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_alignmentSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAlignmentSpecifier" ):
listener.enterAlignmentSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAlignmentSpecifier" ):
listener.exitAlignmentSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlignmentSpecifier" ):
return visitor.visitAlignmentSpecifier(self)
else:
return visitor.visitChildren(self)
def alignmentSpecifier(self):
localctx = CParser.AlignmentSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_alignmentSpecifier)
try:
self.state = 698
la_ = self._interp.adaptivePredict(self._input,55,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 688
self.match(CParser.Alignas)
self.state = 689
self.match(CParser.LeftParen)
self.state = 690
self.typeName()
self.state = 691
self.match(CParser.RightParen)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 693
self.match(CParser.Alignas)
self.state = 694
self.match(CParser.LeftParen)
self.state = 695
self.constantExpression()
self.state = 696
self.match(CParser.RightParen)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def directDeclarator(self):
return self.getTypedRuleContext(CParser.DirectDeclaratorContext,0)
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def getRuleIndex(self):
return CParser.RULE_declarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarator" ):
listener.enterDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarator" ):
listener.exitDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarator" ):
return visitor.visitDeclarator(self)
else:
return visitor.visitChildren(self)
def declarator(self):
localctx = CParser.DeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_declarator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 701
_la = self._input.LA(1)
if _la==CParser.Star or _la==CParser.Caret:
self.state = 700
self.pointer()
self.state = 703
self.directDeclarator(0)
self.state = 707
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,57,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 704
self.gccDeclaratorExtension()
self.state = 709
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,57,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def directDeclarator(self):
return self.getTypedRuleContext(CParser.DirectDeclaratorContext,0)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def parameterTypeList(self):
return self.getTypedRuleContext(CParser.ParameterTypeListContext,0)
def identifierList(self):
return self.getTypedRuleContext(CParser.IdentifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_directDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirectDeclarator" ):
listener.enterDirectDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirectDeclarator" ):
listener.exitDirectDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDirectDeclarator" ):
return visitor.visitDirectDeclarator(self)
else:
return visitor.visitChildren(self)
def directDeclarator(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DirectDeclaratorContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 96
self.enterRecursionRule(localctx, 96, self.RULE_directDeclarator, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 716
token = self._input.LA(1)
if token in [CParser.Identifier]:
self.state = 711
self.match(CParser.Identifier)
elif token in [CParser.LeftParen]:
self.state = 712
self.match(CParser.LeftParen)
self.state = 713
self.declarator()
self.state = 714
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 763
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 761
la_ = self._interp.adaptivePredict(self._input,64,self._ctx)
if la_ == 1:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 718
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 719
self.match(CParser.LeftBracket)
self.state = 721
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 720
self.typeQualifierList(0)
self.state = 724
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 723
self.assignmentExpression()
self.state = 726
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 727
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 728
self.match(CParser.LeftBracket)
self.state = 729
self.match(CParser.Static)
self.state = 731
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 730
self.typeQualifierList(0)
self.state = 733
self.assignmentExpression()
self.state = 734
self.match(CParser.RightBracket)
pass
elif la_ == 3:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 736
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 737
self.match(CParser.LeftBracket)
self.state = 738
self.typeQualifierList(0)
self.state = 739
self.match(CParser.Static)
self.state = 740
self.assignmentExpression()
self.state = 741
self.match(CParser.RightBracket)
pass
elif la_ == 4:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 743
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 744
self.match(CParser.LeftBracket)
self.state = 746
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 745
self.typeQualifierList(0)
self.state = 748
self.match(CParser.Star)
self.state = 749
self.match(CParser.RightBracket)
pass
elif la_ == 5:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 750
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 751
self.match(CParser.LeftParen)
self.state = 752
self.parameterTypeList()
self.state = 753
self.match(CParser.RightParen)
pass
elif la_ == 6:
localctx = CParser.DirectDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directDeclarator)
self.state = 755
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 756
self.match(CParser.LeftParen)
self.state = 758
_la = self._input.LA(1)
if _la==CParser.Identifier:
self.state = 757
self.identifierList(0)
self.state = 760
self.match(CParser.RightParen)
pass
self.state = 765
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class GccDeclaratorExtensionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def gccAttributeSpecifier(self):
return self.getTypedRuleContext(CParser.GccAttributeSpecifierContext,0)
def getRuleIndex(self):
return CParser.RULE_gccDeclaratorExtension
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccDeclaratorExtension" ):
listener.enterGccDeclaratorExtension(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccDeclaratorExtension" ):
listener.exitGccDeclaratorExtension(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccDeclaratorExtension" ):
return visitor.visitGccDeclaratorExtension(self)
else:
return visitor.visitChildren(self)
def gccDeclaratorExtension(self):
localctx = CParser.GccDeclaratorExtensionContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_gccDeclaratorExtension)
self._la = 0 # Token type
try:
self.state = 775
token = self._input.LA(1)
if token in [CParser.T__10]:
self.enterOuterAlt(localctx, 1)
self.state = 766
self.match(CParser.T__10)
self.state = 767
self.match(CParser.LeftParen)
self.state = 769
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 768
self.match(CParser.StringLiteral)
self.state = 771
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CParser.StringLiteral):
break
self.state = 773
self.match(CParser.RightParen)
elif token in [CParser.T__11]:
self.enterOuterAlt(localctx, 2)
self.state = 774
self.gccAttributeSpecifier()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeSpecifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttributeList(self):
return self.getTypedRuleContext(CParser.GccAttributeListContext,0)
def getRuleIndex(self):
return CParser.RULE_gccAttributeSpecifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttributeSpecifier" ):
listener.enterGccAttributeSpecifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttributeSpecifier" ):
listener.exitGccAttributeSpecifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttributeSpecifier" ):
return visitor.visitGccAttributeSpecifier(self)
else:
return visitor.visitChildren(self)
def gccAttributeSpecifier(self):
localctx = CParser.GccAttributeSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_gccAttributeSpecifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 777
self.match(CParser.T__11)
self.state = 778
self.match(CParser.LeftParen)
self.state = 779
self.match(CParser.LeftParen)
self.state = 780
self.gccAttributeList()
self.state = 781
self.match(CParser.RightParen)
self.state = 782
self.match(CParser.RightParen)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gccAttribute(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccAttributeContext)
else:
return self.getTypedRuleContext(CParser.GccAttributeContext,i)
def getRuleIndex(self):
return CParser.RULE_gccAttributeList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttributeList" ):
listener.enterGccAttributeList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttributeList" ):
listener.exitGccAttributeList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttributeList" ):
return visitor.visitGccAttributeList(self)
else:
return visitor.visitChildren(self)
def gccAttributeList(self):
localctx = CParser.GccAttributeListContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_gccAttributeList)
self._la = 0 # Token type
try:
self.state = 793
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 784
self.gccAttribute()
self.state = 789
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 785
self.match(CParser.Comma)
self.state = 786
self.gccAttribute()
self.state = 791
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GccAttributeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def argumentExpressionList(self):
return self.getTypedRuleContext(CParser.ArgumentExpressionListContext,0)
def getRuleIndex(self):
return CParser.RULE_gccAttribute
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGccAttribute" ):
listener.enterGccAttribute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGccAttribute" ):
listener.exitGccAttribute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGccAttribute" ):
return visitor.visitGccAttribute(self)
else:
return visitor.visitChildren(self)
def gccAttribute(self):
localctx = CParser.GccAttributeContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_gccAttribute)
self._la = 0 # Token type
try:
self.state = 804
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__1, CParser.T__2, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__10, CParser.T__11, CParser.T__12, CParser.T__13, CParser.Auto, CParser.Break, CParser.Case, CParser.Char, CParser.Const, CParser.Continue, CParser.Default, CParser.Do, CParser.Double, CParser.Else, CParser.Enum, CParser.Extern, CParser.Float, CParser.For, CParser.Goto, CParser.If, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Return, CParser.Short, CParser.Signed, CParser.Sizeof, CParser.Static, CParser.Struct, CParser.Switch, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.While, CParser.Alignas, CParser.Alignof, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Generic, CParser.Imaginary, CParser.Noreturn, CParser.StaticAssert, CParser.ThreadLocal, CParser.LeftBracket, CParser.RightBracket, CParser.LeftBrace, CParser.RightBrace, CParser.Less, CParser.LessEqual, CParser.Greater, CParser.GreaterEqual, CParser.LeftShift, CParser.RightShift, CParser.Plus, CParser.PlusPlus, CParser.Minus, CParser.MinusMinus, CParser.Star, CParser.Div, CParser.Mod, CParser.And, CParser.Or, CParser.AndAnd, CParser.OrOr, CParser.Caret, CParser.Not, CParser.Tilde, CParser.Question, CParser.Colon, CParser.Semi, CParser.Assign, CParser.StarAssign, CParser.DivAssign, CParser.ModAssign, CParser.PlusAssign, CParser.MinusAssign, CParser.LeftShiftAssign, CParser.RightShiftAssign, CParser.AndAssign, CParser.XorAssign, CParser.OrAssign, CParser.Equal, CParser.NotEqual, CParser.Arrow, CParser.Dot, CParser.Ellipsis, CParser.Identifier, CParser.Constant, CParser.StringLiteral, CParser.LineDirective, CParser.PragmaDirective, CParser.Whitespace, CParser.Newline, CParser.BlockComment, CParser.LineComment]:
self.enterOuterAlt(localctx, 1)
self.state = 795
_la = self._input.LA(1)
if _la <= 0 or ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.RightParen - 59)) | (1 << (CParser.Comma - 59)))) != 0):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 801
_la = self._input.LA(1)
if _la==CParser.LeftParen:
self.state = 796
self.match(CParser.LeftParen)
self.state = 798
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 797
self.argumentExpressionList(0)
self.state = 800
self.match(CParser.RightParen)
elif token in [CParser.RightParen, CParser.Comma]:
self.enterOuterAlt(localctx, 2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NestedParenthesesBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nestedParenthesesBlock(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.NestedParenthesesBlockContext)
else:
return self.getTypedRuleContext(CParser.NestedParenthesesBlockContext,i)
def getRuleIndex(self):
return CParser.RULE_nestedParenthesesBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNestedParenthesesBlock" ):
listener.enterNestedParenthesesBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNestedParenthesesBlock" ):
listener.exitNestedParenthesesBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNestedParenthesesBlock" ):
return visitor.visitNestedParenthesesBlock(self)
else:
return visitor.visitChildren(self)
def nestedParenthesesBlock(self):
localctx = CParser.NestedParenthesesBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_nestedParenthesesBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 813
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__10) | (1 << CParser.T__11) | (1 << CParser.T__12) | (1 << CParser.T__13) | (1 << CParser.Auto) | (1 << CParser.Break) | (1 << CParser.Case) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Continue) | (1 << CParser.Default) | (1 << CParser.Do) | (1 << CParser.Double) | (1 << CParser.Else) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.For) | (1 << CParser.Goto) | (1 << CParser.If) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Return) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Sizeof) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Switch) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.While) | (1 << CParser.Alignas) | (1 << CParser.Alignof) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Generic) | (1 << CParser.Imaginary) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen) | (1 << CParser.LeftBracket) | (1 << CParser.RightBracket) | (1 << CParser.LeftBrace))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (CParser.RightBrace - 64)) | (1 << (CParser.Less - 64)) | (1 << (CParser.LessEqual - 64)) | (1 << (CParser.Greater - 64)) | (1 << (CParser.GreaterEqual - 64)) | (1 << (CParser.LeftShift - 64)) | (1 << (CParser.RightShift - 64)) | (1 << (CParser.Plus - 64)) | (1 << (CParser.PlusPlus - 64)) | (1 << (CParser.Minus - 64)) | (1 << (CParser.MinusMinus - 64)) | (1 << (CParser.Star - 64)) | (1 << (CParser.Div - 64)) | (1 << (CParser.Mod - 64)) | (1 << (CParser.And - 64)) | (1 << (CParser.Or - 64)) | (1 << (CParser.AndAnd - 64)) | (1 << (CParser.OrOr - 64)) | (1 << (CParser.Caret - 64)) | (1 << (CParser.Not - 64)) | (1 << (CParser.Tilde - 64)) | (1 << (CParser.Question - 64)) | (1 << (CParser.Colon - 64)) | (1 << (CParser.Semi - 64)) | (1 << (CParser.Comma - 64)) | (1 << (CParser.Assign - 64)) | (1 << (CParser.StarAssign - 64)) | (1 << (CParser.DivAssign - 64)) | (1 << (CParser.ModAssign - 64)) | (1 << (CParser.PlusAssign - 64)) | (1 << (CParser.MinusAssign - 64)) | (1 << (CParser.LeftShiftAssign - 64)) | (1 << (CParser.RightShiftAssign - 64)) | (1 << (CParser.AndAssign - 64)) | (1 << (CParser.XorAssign - 64)) | (1 << (CParser.OrAssign - 64)) | (1 << (CParser.Equal - 64)) | (1 << (CParser.NotEqual - 64)) | (1 << (CParser.Arrow - 64)) | (1 << (CParser.Dot - 64)) | (1 << (CParser.Ellipsis - 64)) | (1 << (CParser.Identifier - 64)) | (1 << (CParser.Constant - 64)) | (1 << (CParser.StringLiteral - 64)) | (1 << (CParser.LineDirective - 64)) | (1 << (CParser.PragmaDirective - 64)) | (1 << (CParser.Whitespace - 64)) | (1 << (CParser.Newline - 64)) | (1 << (CParser.BlockComment - 64)) | (1 << (CParser.LineComment - 64)))) != 0):
self.state = 811
token = self._input.LA(1)
if token in [CParser.T__0, CParser.T__1, CParser.T__2, CParser.T__3, CParser.T__4, CParser.T__5, CParser.T__6, CParser.T__7, CParser.T__8, CParser.T__9, CParser.T__10, CParser.T__11, CParser.T__12, CParser.T__13, CParser.Auto, CParser.Break, CParser.Case, CParser.Char, CParser.Const, CParser.Continue, CParser.Default, CParser.Do, CParser.Double, CParser.Else, CParser.Enum, CParser.Extern, CParser.Float, CParser.For, CParser.Goto, CParser.If, CParser.Inline, CParser.Int, CParser.Long, CParser.Register, CParser.Restrict, CParser.Return, CParser.Short, CParser.Signed, CParser.Sizeof, CParser.Static, CParser.Struct, CParser.Switch, CParser.Typedef, CParser.Union, CParser.Unsigned, CParser.Void, CParser.Volatile, CParser.While, CParser.Alignas, CParser.Alignof, CParser.Atomic, CParser.Bool, CParser.Complex, CParser.Generic, CParser.Imaginary, CParser.Noreturn, CParser.StaticAssert, CParser.ThreadLocal, CParser.LeftBracket, CParser.RightBracket, CParser.LeftBrace, CParser.RightBrace, CParser.Less, CParser.LessEqual, CParser.Greater, CParser.GreaterEqual, CParser.LeftShift, CParser.RightShift, CParser.Plus, CParser.PlusPlus, CParser.Minus, CParser.MinusMinus, CParser.Star, CParser.Div, CParser.Mod, CParser.And, CParser.Or, CParser.AndAnd, CParser.OrOr, CParser.Caret, CParser.Not, CParser.Tilde, CParser.Question, CParser.Colon, CParser.Semi, CParser.Comma, CParser.Assign, CParser.StarAssign, CParser.DivAssign, CParser.ModAssign, CParser.PlusAssign, CParser.MinusAssign, CParser.LeftShiftAssign, CParser.RightShiftAssign, CParser.AndAssign, CParser.XorAssign, CParser.OrAssign, CParser.Equal, CParser.NotEqual, CParser.Arrow, CParser.Dot, CParser.Ellipsis, CParser.Identifier, CParser.Constant, CParser.StringLiteral, CParser.LineDirective, CParser.PragmaDirective, CParser.Whitespace, CParser.Newline, CParser.BlockComment, CParser.LineComment]:
self.state = 806
_la = self._input.LA(1)
if _la <= 0 or _la==CParser.LeftParen or _la==CParser.RightParen:
self._errHandler.recoverInline(self)
else:
self.consume()
elif token in [CParser.LeftParen]:
self.state = 807
self.match(CParser.LeftParen)
self.state = 808
self.nestedParenthesesBlock()
self.state = 809
self.match(CParser.RightParen)
else:
raise NoViableAltException(self)
self.state = 815
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PointerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def getRuleIndex(self):
return CParser.RULE_pointer
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPointer" ):
listener.enterPointer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPointer" ):
listener.exitPointer(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPointer" ):
return visitor.visitPointer(self)
else:
return visitor.visitChildren(self)
def pointer(self):
localctx = CParser.PointerContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_pointer)
self._la = 0 # Token type
try:
self.state = 834
la_ = self._interp.adaptivePredict(self._input,79,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 816
self.match(CParser.Star)
self.state = 818
la_ = self._interp.adaptivePredict(self._input,75,self._ctx)
if la_ == 1:
self.state = 817
self.typeQualifierList(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 820
self.match(CParser.Star)
self.state = 822
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 821
self.typeQualifierList(0)
self.state = 824
self.pointer()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 825
self.match(CParser.Caret)
self.state = 827
la_ = self._interp.adaptivePredict(self._input,77,self._ctx)
if la_ == 1:
self.state = 826
self.typeQualifierList(0)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 829
self.match(CParser.Caret)
self.state = 831
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 830
self.typeQualifierList(0)
self.state = 833
self.pointer()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeQualifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeQualifier(self):
return self.getTypedRuleContext(CParser.TypeQualifierContext,0)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_typeQualifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeQualifierList" ):
listener.enterTypeQualifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeQualifierList" ):
listener.exitTypeQualifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeQualifierList" ):
return visitor.visitTypeQualifierList(self)
else:
return visitor.visitChildren(self)
def typeQualifierList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.TypeQualifierListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 110
self.enterRecursionRule(localctx, 110, self.RULE_typeQualifierList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 837
self.typeQualifier()
self._ctx.stop = self._input.LT(-1)
self.state = 843
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,80,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.TypeQualifierListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_typeQualifierList)
self.state = 839
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 840
self.typeQualifier()
self.state = 845
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,80,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ParameterTypeListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameterList(self):
return self.getTypedRuleContext(CParser.ParameterListContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterTypeList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterTypeList" ):
listener.enterParameterTypeList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterTypeList" ):
listener.exitParameterTypeList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterTypeList" ):
return visitor.visitParameterTypeList(self)
else:
return visitor.visitChildren(self)
def parameterTypeList(self):
localctx = CParser.ParameterTypeListContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_parameterTypeList)
try:
self.state = 851
la_ = self._interp.adaptivePredict(self._input,81,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 846
self.parameterList(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 847
self.parameterList(0)
self.state = 848
self.match(CParser.Comma)
self.state = 849
self.match(CParser.Ellipsis)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameterDeclaration(self):
return self.getTypedRuleContext(CParser.ParameterDeclarationContext,0)
def parameterList(self):
return self.getTypedRuleContext(CParser.ParameterListContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterList" ):
listener.enterParameterList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterList" ):
listener.exitParameterList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterList" ):
return visitor.visitParameterList(self)
else:
return visitor.visitChildren(self)
def parameterList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.ParameterListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 114
self.enterRecursionRule(localctx, 114, self.RULE_parameterList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 854
self.parameterDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 861
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,82,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.ParameterListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_parameterList)
self.state = 856
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 857
self.match(CParser.Comma)
self.state = 858
self.parameterDeclaration()
self.state = 863
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,82,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ParameterDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def declarationSpecifiers2(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiers2Context,0)
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_parameterDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterDeclaration" ):
listener.enterParameterDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterDeclaration" ):
listener.exitParameterDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameterDeclaration" ):
return visitor.visitParameterDeclaration(self)
else:
return visitor.visitChildren(self)
def parameterDeclaration(self):
localctx = CParser.ParameterDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_parameterDeclaration)
try:
self.state = 871
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 864
self.declarationSpecifiers()
self.state = 865
self.declarator()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 867
self.declarationSpecifiers2()
self.state = 869
la_ = self._interp.adaptivePredict(self._input,83,self._ctx)
if la_ == 1:
self.state = 868
self.abstractDeclarator()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def identifierList(self):
return self.getTypedRuleContext(CParser.IdentifierListContext,0)
def getRuleIndex(self):
return CParser.RULE_identifierList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdentifierList" ):
listener.enterIdentifierList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdentifierList" ):
listener.exitIdentifierList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdentifierList" ):
return visitor.visitIdentifierList(self)
else:
return visitor.visitChildren(self)
def identifierList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.IdentifierListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 118
self.enterRecursionRule(localctx, 118, self.RULE_identifierList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 874
self.match(CParser.Identifier)
self._ctx.stop = self._input.LT(-1)
self.state = 881
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,85,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.IdentifierListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_identifierList)
self.state = 876
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 877
self.match(CParser.Comma)
self.state = 878
self.match(CParser.Identifier)
self.state = 883
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,85,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TypeNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def specifierQualifierList(self):
return self.getTypedRuleContext(CParser.SpecifierQualifierListContext,0)
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_typeName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeName" ):
listener.enterTypeName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeName" ):
listener.exitTypeName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeName" ):
return visitor.visitTypeName(self)
else:
return visitor.visitChildren(self)
def typeName(self):
localctx = CParser.TypeNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_typeName)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 884
self.specifierQualifierList()
self.state = 886
_la = self._input.LA(1)
if ((((_la - 59)) & ~0x3f) == 0 and ((1 << (_la - 59)) & ((1 << (CParser.LeftParen - 59)) | (1 << (CParser.LeftBracket - 59)) | (1 << (CParser.Star - 59)) | (1 << (CParser.Caret - 59)))) != 0):
self.state = 885
self.abstractDeclarator()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AbstractDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pointer(self):
return self.getTypedRuleContext(CParser.PointerContext,0)
def directAbstractDeclarator(self):
return self.getTypedRuleContext(CParser.DirectAbstractDeclaratorContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def getRuleIndex(self):
return CParser.RULE_abstractDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAbstractDeclarator" ):
listener.enterAbstractDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAbstractDeclarator" ):
listener.exitAbstractDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAbstractDeclarator" ):
return visitor.visitAbstractDeclarator(self)
else:
return visitor.visitChildren(self)
def abstractDeclarator(self):
localctx = CParser.AbstractDeclaratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 122, self.RULE_abstractDeclarator)
self._la = 0 # Token type
try:
self.state = 899
la_ = self._interp.adaptivePredict(self._input,89,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 888
self.pointer()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 890
_la = self._input.LA(1)
if _la==CParser.Star or _la==CParser.Caret:
self.state = 889
self.pointer()
self.state = 892
self.directAbstractDeclarator(0)
self.state = 896
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,88,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 893
self.gccDeclaratorExtension()
self.state = 898
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,88,self._ctx)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectAbstractDeclaratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def abstractDeclarator(self):
return self.getTypedRuleContext(CParser.AbstractDeclaratorContext,0)
def gccDeclaratorExtension(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.GccDeclaratorExtensionContext)
else:
return self.getTypedRuleContext(CParser.GccDeclaratorExtensionContext,i)
def typeQualifierList(self):
return self.getTypedRuleContext(CParser.TypeQualifierListContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def parameterTypeList(self):
return self.getTypedRuleContext(CParser.ParameterTypeListContext,0)
def directAbstractDeclarator(self):
return self.getTypedRuleContext(CParser.DirectAbstractDeclaratorContext,0)
def getRuleIndex(self):
return CParser.RULE_directAbstractDeclarator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirectAbstractDeclarator" ):
listener.enterDirectAbstractDeclarator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirectAbstractDeclarator" ):
listener.exitDirectAbstractDeclarator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDirectAbstractDeclarator" ):
return visitor.visitDirectAbstractDeclarator(self)
else:
return visitor.visitChildren(self)
def directAbstractDeclarator(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DirectAbstractDeclaratorContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 124
self.enterRecursionRule(localctx, 124, self.RULE_directAbstractDeclarator, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 947
la_ = self._interp.adaptivePredict(self._input,96,self._ctx)
if la_ == 1:
self.state = 902
self.match(CParser.LeftParen)
self.state = 903
self.abstractDeclarator()
self.state = 904
self.match(CParser.RightParen)
self.state = 908
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,90,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 905
self.gccDeclaratorExtension()
self.state = 910
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,90,self._ctx)
pass
elif la_ == 2:
self.state = 911
self.match(CParser.LeftBracket)
self.state = 913
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 912
self.typeQualifierList(0)
self.state = 916
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 915
self.assignmentExpression()
self.state = 918
self.match(CParser.RightBracket)
pass
elif la_ == 3:
self.state = 919
self.match(CParser.LeftBracket)
self.state = 920
self.match(CParser.Static)
self.state = 922
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 921
self.typeQualifierList(0)
self.state = 924
self.assignmentExpression()
self.state = 925
self.match(CParser.RightBracket)
pass
elif la_ == 4:
self.state = 927
self.match(CParser.LeftBracket)
self.state = 928
self.typeQualifierList(0)
self.state = 929
self.match(CParser.Static)
self.state = 930
self.assignmentExpression()
self.state = 931
self.match(CParser.RightBracket)
pass
elif la_ == 5:
self.state = 933
self.match(CParser.LeftBracket)
self.state = 934
self.match(CParser.Star)
self.state = 935
self.match(CParser.RightBracket)
pass
elif la_ == 6:
self.state = 936
self.match(CParser.LeftParen)
self.state = 938
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 937
self.parameterTypeList()
self.state = 940
self.match(CParser.RightParen)
self.state = 944
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,95,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 941
self.gccDeclaratorExtension()
self.state = 946
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,95,self._ctx)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 992
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,103,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 990
la_ = self._interp.adaptivePredict(self._input,102,self._ctx)
if la_ == 1:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 949
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 950
self.match(CParser.LeftBracket)
self.state = 952
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 951
self.typeQualifierList(0)
self.state = 955
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 954
self.assignmentExpression()
self.state = 957
self.match(CParser.RightBracket)
pass
elif la_ == 2:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 958
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 959
self.match(CParser.LeftBracket)
self.state = 960
self.match(CParser.Static)
self.state = 962
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.Const) | (1 << CParser.Restrict) | (1 << CParser.Volatile) | (1 << CParser.Atomic))) != 0):
self.state = 961
self.typeQualifierList(0)
self.state = 964
self.assignmentExpression()
self.state = 965
self.match(CParser.RightBracket)
pass
elif la_ == 3:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 967
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 968
self.match(CParser.LeftBracket)
self.state = 969
self.typeQualifierList(0)
self.state = 970
self.match(CParser.Static)
self.state = 971
self.assignmentExpression()
self.state = 972
self.match(CParser.RightBracket)
pass
elif la_ == 4:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 974
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 975
self.match(CParser.LeftBracket)
self.state = 976
self.match(CParser.Star)
self.state = 977
self.match(CParser.RightBracket)
pass
elif la_ == 5:
localctx = CParser.DirectAbstractDeclaratorContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_directAbstractDeclarator)
self.state = 978
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 979
self.match(CParser.LeftParen)
self.state = 981
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 980
self.parameterTypeList()
self.state = 983
self.match(CParser.RightParen)
self.state = 987
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,101,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 984
self.gccDeclaratorExtension()
self.state = 989
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,101,self._ctx)
pass
self.state = 994
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,103,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TypedefNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_typedefName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypedefName" ):
listener.enterTypedefName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypedefName" ):
listener.exitTypedefName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypedefName" ):
return visitor.visitTypedefName(self)
else:
return visitor.visitChildren(self)
def typedefName(self):
localctx = CParser.TypedefNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 126, self.RULE_typedefName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 995
self.match(CParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitializerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(CParser.AssignmentExpressionContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def getRuleIndex(self):
return CParser.RULE_initializer
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitializer" ):
listener.enterInitializer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitializer" ):
listener.exitInitializer(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitializer" ):
return visitor.visitInitializer(self)
else:
return visitor.visitChildren(self)
def initializer(self):
localctx = CParser.InitializerContext(self, self._ctx, self.state)
self.enterRule(localctx, 128, self.RULE_initializer)
try:
self.state = 1007
la_ = self._interp.adaptivePredict(self._input,104,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 997
self.assignmentExpression()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 998
self.match(CParser.LeftBrace)
self.state = 999
self.initializerList(0)
self.state = 1000
self.match(CParser.RightBrace)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1002
self.match(CParser.LeftBrace)
self.state = 1003
self.initializerList(0)
self.state = 1004
self.match(CParser.Comma)
self.state = 1005
self.match(CParser.RightBrace)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InitializerListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def initializer(self):
return self.getTypedRuleContext(CParser.InitializerContext,0)
def designation(self):
return self.getTypedRuleContext(CParser.DesignationContext,0)
def initializerList(self):
return self.getTypedRuleContext(CParser.InitializerListContext,0)
def getRuleIndex(self):
return CParser.RULE_initializerList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInitializerList" ):
listener.enterInitializerList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInitializerList" ):
listener.exitInitializerList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInitializerList" ):
return visitor.visitInitializerList(self)
else:
return visitor.visitChildren(self)
def initializerList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.InitializerListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 130
self.enterRecursionRule(localctx, 130, self.RULE_initializerList, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1011
_la = self._input.LA(1)
if _la==CParser.LeftBracket or _la==CParser.Dot:
self.state = 1010
self.designation()
self.state = 1013
self.initializer()
self._ctx.stop = self._input.LT(-1)
self.state = 1023
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,107,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.InitializerListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_initializerList)
self.state = 1015
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1016
self.match(CParser.Comma)
self.state = 1018
_la = self._input.LA(1)
if _la==CParser.LeftBracket or _la==CParser.Dot:
self.state = 1017
self.designation()
self.state = 1020
self.initializer()
self.state = 1025
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,107,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class DesignationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def designatorList(self):
return self.getTypedRuleContext(CParser.DesignatorListContext,0)
def getRuleIndex(self):
return CParser.RULE_designation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignation" ):
listener.enterDesignation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignation" ):
listener.exitDesignation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignation" ):
return visitor.visitDesignation(self)
else:
return visitor.visitChildren(self)
def designation(self):
localctx = CParser.DesignationContext(self, self._ctx, self.state)
self.enterRule(localctx, 132, self.RULE_designation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1026
self.designatorList(0)
self.state = 1027
self.match(CParser.Assign)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DesignatorListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def designator(self):
return self.getTypedRuleContext(CParser.DesignatorContext,0)
def designatorList(self):
return self.getTypedRuleContext(CParser.DesignatorListContext,0)
def getRuleIndex(self):
return CParser.RULE_designatorList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignatorList" ):
listener.enterDesignatorList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignatorList" ):
listener.exitDesignatorList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignatorList" ):
return visitor.visitDesignatorList(self)
else:
return visitor.visitChildren(self)
def designatorList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DesignatorListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 134
self.enterRecursionRule(localctx, 134, self.RULE_designatorList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1030
self.designator()
self._ctx.stop = self._input.LT(-1)
self.state = 1036
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,108,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.DesignatorListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_designatorList)
self.state = 1032
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1033
self.designator()
self.state = 1038
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,108,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class DesignatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def getRuleIndex(self):
return CParser.RULE_designator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDesignator" ):
listener.enterDesignator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDesignator" ):
listener.exitDesignator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDesignator" ):
return visitor.visitDesignator(self)
else:
return visitor.visitChildren(self)
def designator(self):
localctx = CParser.DesignatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 136, self.RULE_designator)
try:
self.state = 1045
token = self._input.LA(1)
if token in [CParser.LeftBracket]:
self.enterOuterAlt(localctx, 1)
self.state = 1039
self.match(CParser.LeftBracket)
self.state = 1040
self.constantExpression()
self.state = 1041
self.match(CParser.RightBracket)
elif token in [CParser.Dot]:
self.enterOuterAlt(localctx, 2)
self.state = 1043
self.match(CParser.Dot)
self.state = 1044
self.match(CParser.Identifier)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StaticAssertDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def StringLiteral(self, i:int=None):
if i is None:
return self.getTokens(CParser.StringLiteral)
else:
return self.getToken(CParser.StringLiteral, i)
def getRuleIndex(self):
return CParser.RULE_staticAssertDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStaticAssertDeclaration" ):
listener.enterStaticAssertDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStaticAssertDeclaration" ):
listener.exitStaticAssertDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStaticAssertDeclaration" ):
return visitor.visitStaticAssertDeclaration(self)
else:
return visitor.visitChildren(self)
def staticAssertDeclaration(self):
localctx = CParser.StaticAssertDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 138, self.RULE_staticAssertDeclaration)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1047
self.match(CParser.StaticAssert)
self.state = 1048
self.match(CParser.LeftParen)
self.state = 1049
self.constantExpression()
self.state = 1050
self.match(CParser.Comma)
self.state = 1052
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1051
self.match(CParser.StringLiteral)
self.state = 1054
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CParser.StringLiteral):
break
self.state = 1056
self.match(CParser.RightParen)
self.state = 1057
self.match(CParser.Semi)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def labeledStatement(self):
return self.getTypedRuleContext(CParser.LabeledStatementContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def expressionStatement(self):
return self.getTypedRuleContext(CParser.ExpressionStatementContext,0)
def selectionStatement(self):
return self.getTypedRuleContext(CParser.SelectionStatementContext,0)
def iterationStatement(self):
return self.getTypedRuleContext(CParser.IterationStatementContext,0)
def jumpStatement(self):
return self.getTypedRuleContext(CParser.JumpStatementContext,0)
def logicalOrExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.LogicalOrExpressionContext)
else:
return self.getTypedRuleContext(CParser.LogicalOrExpressionContext,i)
def getRuleIndex(self):
return CParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatement" ):
return visitor.visitStatement(self)
else:
return visitor.visitChildren(self)
def statement(self):
localctx = CParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 140, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 1096
la_ = self._interp.adaptivePredict(self._input,116,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1059
self.labeledStatement()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1060
self.compoundStatement()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1061
self.expressionStatement()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1062
self.selectionStatement()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1063
self.iterationStatement()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 1064
self.jumpStatement()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 1065
_la = self._input.LA(1)
if not(_la==CParser.T__10 or _la==CParser.T__12):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 1066
_la = self._input.LA(1)
if not(_la==CParser.T__13 or _la==CParser.Volatile):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 1067
self.match(CParser.LeftParen)
self.state = 1076
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1068
self.logicalOrExpression(0)
self.state = 1073
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 1069
self.match(CParser.Comma)
self.state = 1070
self.logicalOrExpression(0)
self.state = 1075
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1091
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Colon:
self.state = 1078
self.match(CParser.Colon)
self.state = 1087
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1079
self.logicalOrExpression(0)
self.state = 1084
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CParser.Comma:
self.state = 1080
self.match(CParser.Comma)
self.state = 1081
self.logicalOrExpression(0)
self.state = 1086
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1093
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1094
self.match(CParser.RightParen)
self.state = 1095
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def constantExpression(self):
return self.getTypedRuleContext(CParser.ConstantExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_labeledStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledStatement" ):
listener.enterLabeledStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledStatement" ):
listener.exitLabeledStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledStatement" ):
return visitor.visitLabeledStatement(self)
else:
return visitor.visitChildren(self)
def labeledStatement(self):
localctx = CParser.LabeledStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 142, self.RULE_labeledStatement)
try:
self.state = 1109
token = self._input.LA(1)
if token in [CParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 1098
self.match(CParser.Identifier)
self.state = 1099
self.match(CParser.Colon)
self.state = 1100
self.statement()
elif token in [CParser.Case]:
self.enterOuterAlt(localctx, 2)
self.state = 1101
self.match(CParser.Case)
self.state = 1102
self.constantExpression()
self.state = 1103
self.match(CParser.Colon)
self.state = 1104
self.statement()
elif token in [CParser.Default]:
self.enterOuterAlt(localctx, 3)
self.state = 1106
self.match(CParser.Default)
self.state = 1107
self.match(CParser.Colon)
self.state = 1108
self.statement()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CompoundStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockItemList(self):
return self.getTypedRuleContext(CParser.BlockItemListContext,0)
def getRuleIndex(self):
return CParser.RULE_compoundStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompoundStatement" ):
listener.enterCompoundStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompoundStatement" ):
listener.exitCompoundStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCompoundStatement" ):
return visitor.visitCompoundStatement(self)
else:
return visitor.visitChildren(self)
def compoundStatement(self):
localctx = CParser.CompoundStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 144, self.RULE_compoundStatement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1111
self.match(CParser.LeftBrace)
self.state = 1113
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__10) | (1 << CParser.T__11) | (1 << CParser.T__12) | (1 << CParser.Auto) | (1 << CParser.Break) | (1 << CParser.Case) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Continue) | (1 << CParser.Default) | (1 << CParser.Do) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.For) | (1 << CParser.Goto) | (1 << CParser.If) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Return) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Sizeof) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Switch) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.While) | (1 << CParser.Alignas) | (1 << CParser.Alignof) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Generic) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen) | (1 << CParser.LeftBrace))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Semi - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1112
self.blockItemList(0)
self.state = 1115
self.match(CParser.RightBrace)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockItemListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockItem(self):
return self.getTypedRuleContext(CParser.BlockItemContext,0)
def blockItemList(self):
return self.getTypedRuleContext(CParser.BlockItemListContext,0)
def getRuleIndex(self):
return CParser.RULE_blockItemList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockItemList" ):
listener.enterBlockItemList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockItemList" ):
listener.exitBlockItemList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockItemList" ):
return visitor.visitBlockItemList(self)
else:
return visitor.visitChildren(self)
def blockItemList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.BlockItemListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 146
self.enterRecursionRule(localctx, 146, self.RULE_blockItemList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1118
self.blockItem()
self._ctx.stop = self._input.LT(-1)
self.state = 1124
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,119,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.BlockItemListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_blockItemList)
self.state = 1120
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1121
self.blockItem()
self.state = 1126
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,119,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class BlockItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def getRuleIndex(self):
return CParser.RULE_blockItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockItem" ):
listener.enterBlockItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockItem" ):
listener.exitBlockItem(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockItem" ):
return visitor.visitBlockItem(self)
else:
return visitor.visitChildren(self)
def blockItem(self):
localctx = CParser.BlockItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 148, self.RULE_blockItem)
try:
self.state = 1129
la_ = self._interp.adaptivePredict(self._input,120,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1127
self.declaration()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1128
self.statement()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_expressionStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressionStatement" ):
listener.enterExpressionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressionStatement" ):
listener.exitExpressionStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpressionStatement" ):
return visitor.visitExpressionStatement(self)
else:
return visitor.visitChildren(self)
def expressionStatement(self):
localctx = CParser.ExpressionStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 150, self.RULE_expressionStatement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1132
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1131
self.expression(0)
self.state = 1134
self.match(CParser.Semi)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SelectionStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.StatementContext)
else:
return self.getTypedRuleContext(CParser.StatementContext,i)
def getRuleIndex(self):
return CParser.RULE_selectionStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectionStatement" ):
listener.enterSelectionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectionStatement" ):
listener.exitSelectionStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectionStatement" ):
return visitor.visitSelectionStatement(self)
else:
return visitor.visitChildren(self)
def selectionStatement(self):
localctx = CParser.SelectionStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 152, self.RULE_selectionStatement)
try:
self.state = 1151
token = self._input.LA(1)
if token in [CParser.If]:
self.enterOuterAlt(localctx, 1)
self.state = 1136
self.match(CParser.If)
self.state = 1137
self.match(CParser.LeftParen)
self.state = 1138
self.expression(0)
self.state = 1139
self.match(CParser.RightParen)
self.state = 1140
self.statement()
self.state = 1143
la_ = self._interp.adaptivePredict(self._input,122,self._ctx)
if la_ == 1:
self.state = 1141
self.match(CParser.Else)
self.state = 1142
self.statement()
elif token in [CParser.Switch]:
self.enterOuterAlt(localctx, 2)
self.state = 1145
self.match(CParser.Switch)
self.state = 1146
self.match(CParser.LeftParen)
self.state = 1147
self.expression(0)
self.state = 1148
self.match(CParser.RightParen)
self.state = 1149
self.statement()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IterationStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CParser.ExpressionContext)
else:
return self.getTypedRuleContext(CParser.ExpressionContext,i)
def statement(self):
return self.getTypedRuleContext(CParser.StatementContext,0)
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_iterationStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIterationStatement" ):
listener.enterIterationStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIterationStatement" ):
listener.exitIterationStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIterationStatement" ):
return visitor.visitIterationStatement(self)
else:
return visitor.visitChildren(self)
def iterationStatement(self):
localctx = CParser.IterationStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 154, self.RULE_iterationStatement)
self._la = 0 # Token type
try:
self.state = 1195
la_ = self._interp.adaptivePredict(self._input,129,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1153
self.match(CParser.While)
self.state = 1154
self.match(CParser.LeftParen)
self.state = 1155
self.expression(0)
self.state = 1156
self.match(CParser.RightParen)
self.state = 1157
self.statement()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1159
self.match(CParser.Do)
self.state = 1160
self.statement()
self.state = 1161
self.match(CParser.While)
self.state = 1162
self.match(CParser.LeftParen)
self.state = 1163
self.expression(0)
self.state = 1164
self.match(CParser.RightParen)
self.state = 1165
self.match(CParser.Semi)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1167
self.match(CParser.For)
self.state = 1168
self.match(CParser.LeftParen)
self.state = 1170
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1169
self.expression(0)
self.state = 1172
self.match(CParser.Semi)
self.state = 1174
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1173
self.expression(0)
self.state = 1176
self.match(CParser.Semi)
self.state = 1178
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1177
self.expression(0)
self.state = 1180
self.match(CParser.RightParen)
self.state = 1181
self.statement()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1182
self.match(CParser.For)
self.state = 1183
self.match(CParser.LeftParen)
self.state = 1184
self.declaration()
self.state = 1186
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1185
self.expression(0)
self.state = 1188
self.match(CParser.Semi)
self.state = 1190
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1189
self.expression(0)
self.state = 1192
self.match(CParser.RightParen)
self.state = 1193
self.statement()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class JumpStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(CParser.Identifier, 0)
def expression(self):
return self.getTypedRuleContext(CParser.ExpressionContext,0)
def unaryExpression(self):
return self.getTypedRuleContext(CParser.UnaryExpressionContext,0)
def getRuleIndex(self):
return CParser.RULE_jumpStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJumpStatement" ):
listener.enterJumpStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJumpStatement" ):
listener.exitJumpStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJumpStatement" ):
return visitor.visitJumpStatement(self)
else:
return visitor.visitChildren(self)
def jumpStatement(self):
localctx = CParser.JumpStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_jumpStatement)
self._la = 0 # Token type
try:
self.state = 1213
la_ = self._interp.adaptivePredict(self._input,131,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1197
self.match(CParser.Goto)
self.state = 1198
self.match(CParser.Identifier)
self.state = 1199
self.match(CParser.Semi)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1200
self.match(CParser.Continue)
self.state = 1201
self.match(CParser.Semi)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1202
self.match(CParser.Break)
self.state = 1203
self.match(CParser.Semi)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1204
self.match(CParser.Return)
self.state = 1206
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__1) | (1 << CParser.T__2) | (1 << CParser.Sizeof) | (1 << CParser.Alignof) | (1 << CParser.Generic) | (1 << CParser.LeftParen))) != 0) or ((((_la - 71)) & ~0x3f) == 0 and ((1 << (_la - 71)) & ((1 << (CParser.Plus - 71)) | (1 << (CParser.PlusPlus - 71)) | (1 << (CParser.Minus - 71)) | (1 << (CParser.MinusMinus - 71)) | (1 << (CParser.Star - 71)) | (1 << (CParser.And - 71)) | (1 << (CParser.AndAnd - 71)) | (1 << (CParser.Not - 71)) | (1 << (CParser.Tilde - 71)) | (1 << (CParser.Identifier - 71)) | (1 << (CParser.Constant - 71)) | (1 << (CParser.StringLiteral - 71)))) != 0):
self.state = 1205
self.expression(0)
self.state = 1208
self.match(CParser.Semi)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1209
self.match(CParser.Goto)
self.state = 1210
self.unaryExpression()
self.state = 1211
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CompilationUnitContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(CParser.EOF, 0)
def translationUnit(self):
return self.getTypedRuleContext(CParser.TranslationUnitContext,0)
def getRuleIndex(self):
return CParser.RULE_compilationUnit
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompilationUnit" ):
listener.enterCompilationUnit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompilationUnit" ):
listener.exitCompilationUnit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCompilationUnit" ):
return visitor.visitCompilationUnit(self)
else:
return visitor.visitChildren(self)
def compilationUnit(self):
localctx = CParser.CompilationUnitContext(self, self._ctx, self.state)
self.enterRule(localctx, 158, self.RULE_compilationUnit)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1216
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal) | (1 << CParser.LeftParen))) != 0) or ((((_la - 75)) & ~0x3f) == 0 and ((1 << (_la - 75)) & ((1 << (CParser.Star - 75)) | (1 << (CParser.Caret - 75)) | (1 << (CParser.Semi - 75)) | (1 << (CParser.Identifier - 75)))) != 0):
self.state = 1215
self.translationUnit(0)
self.state = 1218
self.match(CParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TranslationUnitContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def externalDeclaration(self):
return self.getTypedRuleContext(CParser.ExternalDeclarationContext,0)
def translationUnit(self):
return self.getTypedRuleContext(CParser.TranslationUnitContext,0)
def getRuleIndex(self):
return CParser.RULE_translationUnit
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTranslationUnit" ):
listener.enterTranslationUnit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTranslationUnit" ):
listener.exitTranslationUnit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTranslationUnit" ):
return visitor.visitTranslationUnit(self)
else:
return visitor.visitChildren(self)
def translationUnit(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.TranslationUnitContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 160
self.enterRecursionRule(localctx, 160, self.RULE_translationUnit, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1221
self.externalDeclaration()
self._ctx.stop = self._input.LT(-1)
self.state = 1227
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,133,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.TranslationUnitContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_translationUnit)
self.state = 1223
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1224
self.externalDeclaration()
self.state = 1229
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,133,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ExternalDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def functionDefinition(self):
return self.getTypedRuleContext(CParser.FunctionDefinitionContext,0)
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def getRuleIndex(self):
return CParser.RULE_externalDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExternalDeclaration" ):
listener.enterExternalDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExternalDeclaration" ):
listener.exitExternalDeclaration(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExternalDeclaration" ):
return visitor.visitExternalDeclaration(self)
else:
return visitor.visitChildren(self)
def externalDeclaration(self):
localctx = CParser.ExternalDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 162, self.RULE_externalDeclaration)
try:
self.state = 1233
la_ = self._interp.adaptivePredict(self._input,134,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1230
self.functionDefinition()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1231
self.declaration()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1232
self.match(CParser.Semi)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionDefinitionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarator(self):
return self.getTypedRuleContext(CParser.DeclaratorContext,0)
def compoundStatement(self):
return self.getTypedRuleContext(CParser.CompoundStatementContext,0)
def declarationSpecifiers(self):
return self.getTypedRuleContext(CParser.DeclarationSpecifiersContext,0)
def declarationList(self):
return self.getTypedRuleContext(CParser.DeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_functionDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionDefinition" ):
listener.enterFunctionDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionDefinition" ):
listener.exitFunctionDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionDefinition" ):
return visitor.visitFunctionDefinition(self)
else:
return visitor.visitChildren(self)
def functionDefinition(self):
localctx = CParser.FunctionDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 164, self.RULE_functionDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1236
la_ = self._interp.adaptivePredict(self._input,135,self._ctx)
if la_ == 1:
self.state = 1235
self.declarationSpecifiers()
self.state = 1238
self.declarator()
self.state = 1240
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << CParser.T__0) | (1 << CParser.T__3) | (1 << CParser.T__4) | (1 << CParser.T__5) | (1 << CParser.T__6) | (1 << CParser.T__7) | (1 << CParser.T__8) | (1 << CParser.T__9) | (1 << CParser.T__11) | (1 << CParser.Auto) | (1 << CParser.Char) | (1 << CParser.Const) | (1 << CParser.Double) | (1 << CParser.Enum) | (1 << CParser.Extern) | (1 << CParser.Float) | (1 << CParser.Inline) | (1 << CParser.Int) | (1 << CParser.Long) | (1 << CParser.Register) | (1 << CParser.Restrict) | (1 << CParser.Short) | (1 << CParser.Signed) | (1 << CParser.Static) | (1 << CParser.Struct) | (1 << CParser.Typedef) | (1 << CParser.Union) | (1 << CParser.Unsigned) | (1 << CParser.Void) | (1 << CParser.Volatile) | (1 << CParser.Alignas) | (1 << CParser.Atomic) | (1 << CParser.Bool) | (1 << CParser.Complex) | (1 << CParser.Noreturn) | (1 << CParser.StaticAssert) | (1 << CParser.ThreadLocal))) != 0) or _la==CParser.Identifier:
self.state = 1239
self.declarationList(0)
self.state = 1242
self.compoundStatement()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declaration(self):
return self.getTypedRuleContext(CParser.DeclarationContext,0)
def declarationList(self):
return self.getTypedRuleContext(CParser.DeclarationListContext,0)
def getRuleIndex(self):
return CParser.RULE_declarationList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationList" ):
listener.enterDeclarationList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationList" ):
listener.exitDeclarationList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarationList" ):
return visitor.visitDeclarationList(self)
else:
return visitor.visitChildren(self)
def declarationList(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = CParser.DeclarationListContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 166
self.enterRecursionRule(localctx, 166, self.RULE_declarationList, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1245
self.declaration()
self._ctx.stop = self._input.LT(-1)
self.state = 1251
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = CParser.DeclarationListContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_declarationList)
self.state = 1247
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1248
self.declaration()
self.state = 1253
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[2] = self.genericAssocList_sempred
self._predicates[4] = self.postfixExpression_sempred
self._predicates[5] = self.argumentExpressionList_sempred
self._predicates[9] = self.multiplicativeExpression_sempred
self._predicates[10] = self.additiveExpression_sempred
self._predicates[11] = self.shiftExpression_sempred
self._predicates[12] = self.relationalExpression_sempred
self._predicates[13] = self.equalityExpression_sempred
self._predicates[14] = self.andExpression_sempred
self._predicates[15] = self.exclusiveOrExpression_sempred
self._predicates[16] = self.inclusiveOrExpression_sempred
self._predicates[17] = self.logicalAndExpression_sempred
self._predicates[18] = self.logicalOrExpression_sempred
self._predicates[22] = self.expression_sempred
self._predicates[28] = self.initDeclaratorList_sempred
self._predicates[34] = self.structDeclarationList_sempred
self._predicates[37] = self.structDeclaratorList_sempred
self._predicates[40] = self.enumeratorList_sempred
self._predicates[48] = self.directDeclarator_sempred
self._predicates[55] = self.typeQualifierList_sempred
self._predicates[57] = self.parameterList_sempred
self._predicates[59] = self.identifierList_sempred
self._predicates[62] = self.directAbstractDeclarator_sempred
self._predicates[65] = self.initializerList_sempred
self._predicates[67] = self.designatorList_sempred
self._predicates[73] = self.blockItemList_sempred
self._predicates[80] = self.translationUnit_sempred
self._predicates[83] = self.declarationList_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def genericAssocList_sempred(self, localctx:GenericAssocListContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 1)
def postfixExpression_sempred(self, localctx:PostfixExpressionContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 10)
if predIndex == 2:
return self.precpred(self._ctx, 9)
if predIndex == 3:
return self.precpred(self._ctx, 8)
if predIndex == 4:
return self.precpred(self._ctx, 7)
if predIndex == 5:
return self.precpred(self._ctx, 6)
if predIndex == 6:
return self.precpred(self._ctx, 5)
def argumentExpressionList_sempred(self, localctx:ArgumentExpressionListContext, predIndex:int):
if predIndex == 7:
return self.precpred(self._ctx, 1)
def multiplicativeExpression_sempred(self, localctx:MultiplicativeExpressionContext, predIndex:int):
if predIndex == 8:
return self.precpred(self._ctx, 3)
if predIndex == 9:
return self.precpred(self._ctx, 2)
if predIndex == 10:
return self.precpred(self._ctx, 1)
def additiveExpression_sempred(self, localctx:AdditiveExpressionContext, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 2)
if predIndex == 12:
return self.precpred(self._ctx, 1)
def shiftExpression_sempred(self, localctx:ShiftExpressionContext, predIndex:int):
if predIndex == 13:
return self.precpred(self._ctx, 2)
if predIndex == 14:
return self.precpred(self._ctx, 1)
def relationalExpression_sempred(self, localctx:RelationalExpressionContext, predIndex:int):
if predIndex == 15:
return self.precpred(self._ctx, 4)
if predIndex == 16:
return self.precpred(self._ctx, 3)
if predIndex == 17:
return self.precpred(self._ctx, 2)
if predIndex == 18:
return self.precpred(self._ctx, 1)
def equalityExpression_sempred(self, localctx:EqualityExpressionContext, predIndex:int):
if predIndex == 19:
return self.precpred(self._ctx, 2)
if predIndex == 20:
return self.precpred(self._ctx, 1)
def andExpression_sempred(self, localctx:AndExpressionContext, predIndex:int):
if predIndex == 21:
return self.precpred(self._ctx, 1)
def exclusiveOrExpression_sempred(self, localctx:ExclusiveOrExpressionContext, predIndex:int):
if predIndex == 22:
return self.precpred(self._ctx, 1)
def inclusiveOrExpression_sempred(self, localctx:InclusiveOrExpressionContext, predIndex:int):
if predIndex == 23:
return self.precpred(self._ctx, 1)
def logicalAndExpression_sempred(self, localctx:LogicalAndExpressionContext, predIndex:int):
if predIndex == 24:
return self.precpred(self._ctx, 1)
def logicalOrExpression_sempred(self, localctx:LogicalOrExpressionContext, predIndex:int):
if predIndex == 25:
return self.precpred(self._ctx, 1)
def expression_sempred(self, localctx:ExpressionContext, predIndex:int):
if predIndex == 26:
return self.precpred(self._ctx, 1)
def initDeclaratorList_sempred(self, localctx:InitDeclaratorListContext, predIndex:int):
if predIndex == 27:
return self.precpred(self._ctx, 1)
def structDeclarationList_sempred(self, localctx:StructDeclarationListContext, predIndex:int):
if predIndex == 28:
return self.precpred(self._ctx, 1)
def structDeclaratorList_sempred(self, localctx:StructDeclaratorListContext, predIndex:int):
if predIndex == 29:
return self.precpred(self._ctx, 1)
def enumeratorList_sempred(self, localctx:EnumeratorListContext, predIndex:int):
if predIndex == 30:
return self.precpred(self._ctx, 1)
def directDeclarator_sempred(self, localctx:DirectDeclaratorContext, predIndex:int):
if predIndex == 31:
return self.precpred(self._ctx, 6)
if predIndex == 32:
return self.precpred(self._ctx, 5)
if predIndex == 33:
return self.precpred(self._ctx, 4)
if predIndex == 34:
return self.precpred(self._ctx, 3)
if predIndex == 35:
return self.precpred(self._ctx, 2)
if predIndex == 36:
return self.precpred(self._ctx, 1)
def typeQualifierList_sempred(self, localctx:TypeQualifierListContext, predIndex:int):
if predIndex == 37:
return self.precpred(self._ctx, 1)
def parameterList_sempred(self, localctx:ParameterListContext, predIndex:int):
if predIndex == 38:
return self.precpred(self._ctx, 1)
def identifierList_sempred(self, localctx:IdentifierListContext, predIndex:int):
if predIndex == 39:
return self.precpred(self._ctx, 1)
def directAbstractDeclarator_sempred(self, localctx:DirectAbstractDeclaratorContext, predIndex:int):
if predIndex == 40:
return self.precpred(self._ctx, 5)
if predIndex == 41:
return self.precpred(self._ctx, 4)
if predIndex == 42:
return self.precpred(self._ctx, 3)
if predIndex == 43:
return self.precpred(self._ctx, 2)
if predIndex == 44:
return self.precpred(self._ctx, 1)
def initializerList_sempred(self, localctx:InitializerListContext, predIndex:int):
if predIndex == 45:
return self.precpred(self._ctx, 1)
def designatorList_sempred(self, localctx:DesignatorListContext, predIndex:int):
if predIndex == 46:
return self.precpred(self._ctx, 1)
def blockItemList_sempred(self, localctx:BlockItemListContext, predIndex:int):
if predIndex == 47:
return self.precpred(self._ctx, 1)
def translationUnit_sempred(self, localctx:TranslationUnitContext, predIndex:int):
if predIndex == 48:
return self.precpred(self._ctx, 1)
def declarationList_sempred(self, localctx:DeclarationListContext, predIndex:int):
if predIndex == 49:
return self.precpred(self._ctx, 1)
| 42.608438
| 3,242
| 0.578005
|
5da96297736f5e16cbae369ad87735aa0bcd18c9
| 8,060
|
py
|
Python
|
homeassistant/components/vizio/media_player.py
|
mgosk/home-assistant
|
3bf27b9afc50044ad0a244702e8a628247eeb3e0
|
[
"Apache-2.0"
] | 2
|
2019-12-05T19:44:04.000Z
|
2021-08-22T12:40:18.000Z
|
homeassistant/components/vizio/media_player.py
|
richh1/home-assistant
|
a14c299a78259386bbcf7787689e3e7dfa5b1dfd
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/vizio/media_player.py
|
richh1/home-assistant
|
a14c299a78259386bbcf7787689e3e7dfa5b1dfd
|
[
"Apache-2.0"
] | 1
|
2018-04-29T02:14:32.000Z
|
2018-04-29T02:14:32.000Z
|
"""Vizio SmartCast Device support."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import (
MediaPlayerDevice,
PLATFORM_SCHEMA
)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_NAME,
STATE_OFF,
STATE_ON
)
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_SUPPRESS_WARNING = 'suppress_warning'
CONF_VOLUME_STEP = 'volume_step'
DEFAULT_NAME = 'Vizio SmartCast'
DEFAULT_VOLUME_STEP = 1
DEFAULT_DEVICE_CLASS = 'tv'
DEVICE_ID = 'pyvizio'
DEVICE_NAME = 'Python Vizio'
ICON = 'mdi:television'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
COMMON_SUPPORTED_COMMANDS = (
SUPPORT_SELECT_SOURCE |
SUPPORT_TURN_ON |
SUPPORT_TURN_OFF |
SUPPORT_VOLUME_MUTE |
SUPPORT_VOLUME_SET |
SUPPORT_VOLUME_STEP
)
SUPPORTED_COMMANDS = {
'soundbar': COMMON_SUPPORTED_COMMANDS,
'tv': (
COMMON_SUPPORTED_COMMANDS |
SUPPORT_NEXT_TRACK |
SUPPORT_PREVIOUS_TRACK
)
}
def validate_auth(config):
"""Validate presence of CONF_ACCESS_TOKEN when CONF_DEVICE_CLASS=tv."""
token = config.get(CONF_ACCESS_TOKEN)
if config[CONF_DEVICE_CLASS] == 'tv' and (token is None or token == ''):
raise vol.Invalid(
"When '{}' is 'tv' then '{}' is required.".format(
CONF_DEVICE_CLASS,
CONF_ACCESS_TOKEN,
),
path=[CONF_ACCESS_TOKEN],
)
return config
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SUPPRESS_WARNING, default=False): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS):
vol.All(cv.string, vol.Lower, vol.In(['tv', 'soundbar'])),
vol.Optional(CONF_VOLUME_STEP, default=DEFAULT_VOLUME_STEP):
vol.All(vol.Coerce(int), vol.Range(min=1, max=10)),
}),
validate_auth,
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vizio media player platform."""
host = config[CONF_HOST]
token = config.get(CONF_ACCESS_TOKEN)
name = config[CONF_NAME]
volume_step = config[CONF_VOLUME_STEP]
device_type = config[CONF_DEVICE_CLASS]
device = VizioDevice(host, token, name, volume_step, device_type)
if device.validate_setup() is False:
fail_auth_msg = ""
if token is not None and token != '':
fail_auth_msg = " and auth token is correct"
_LOGGER.error("Failed to set up Vizio platform, please check if host "
"is valid and available%s", fail_auth_msg)
return
if config[CONF_SUPPRESS_WARNING]:
from requests.packages import urllib3
_LOGGER.warning("InsecureRequestWarning is disabled "
"because of Vizio platform configuration")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
add_entities([device], True)
class VizioDevice(MediaPlayerDevice):
"""Media Player implementation which performs REST requests to device."""
def __init__(self, host, token, name, volume_step, device_type):
"""Initialize Vizio device."""
import pyvizio
self._name = name
self._state = None
self._volume_level = None
self._volume_step = volume_step
self._current_input = None
self._available_inputs = None
self._device_type = device_type
self._supported_commands = SUPPORTED_COMMANDS[device_type]
self._device = pyvizio.Vizio(DEVICE_ID, host, DEFAULT_NAME, token,
device_type)
self._max_volume = float(self._device.get_max_volume())
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve latest state of the device."""
is_on = self._device.get_power_state()
if is_on:
self._state = STATE_ON
volume = self._device.get_current_volume()
if volume is not None:
self._volume_level = float(volume) / self._max_volume
input_ = self._device.get_current_input()
if input_ is not None:
self._current_input = input_.meta_name
inputs = self._device.get_inputs()
if inputs is not None:
self._available_inputs = [input_.name for input_ in inputs]
else:
if is_on is None:
self._state = None
else:
self._state = STATE_OFF
self._volume_level = None
self._current_input = None
self._available_inputs = None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def volume_level(self):
"""Return the volume level of the device."""
return self._volume_level
@property
def source(self):
"""Return current input of the device."""
return self._current_input
@property
def source_list(self):
"""Return list of available inputs of the device."""
return self._available_inputs
@property
def supported_features(self):
"""Flag device features that are supported."""
return self._supported_commands
def turn_on(self):
"""Turn the device on."""
self._device.pow_on()
def turn_off(self):
"""Turn the device off."""
self._device.pow_off()
def mute_volume(self, mute):
"""Mute the volume."""
if mute:
self._device.mute_on()
else:
self._device.mute_off()
def media_previous_track(self):
"""Send previous channel command."""
self._device.ch_down()
def media_next_track(self):
"""Send next channel command."""
self._device.ch_up()
def select_source(self, source):
"""Select input source."""
self._device.input_switch(source)
def volume_up(self):
"""Increasing volume of the device."""
self._device.vol_up(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = min(1.,
self._volume_level +
self._volume_step / self._max_volume)
def volume_down(self):
"""Decreasing volume of the device."""
self._device.vol_down(num=self._volume_step)
if self._volume_level is not None:
self._volume_level = max(0.,
self._volume_level -
self._volume_step / self._max_volume)
def validate_setup(self):
"""Validate if host is available and auth token is correct."""
return self._device.get_current_volume() is not None
def set_volume_level(self, volume):
"""Set volume level."""
if self._volume_level is not None:
if volume > self._volume_level:
num = int(self._max_volume * (volume - self._volume_level))
self._volume_level = volume
self._device.vol_up(num=num)
elif volume < self._volume_level:
num = int(self._max_volume * (self._volume_level - volume))
self._volume_level = volume
self._device.vol_down(num=num)
| 31.607843
| 78
| 0.634367
|
bcd12fb5b1c6aab86c9c02ca05e57f0ccdffa222
| 191,711
|
py
|
Python
|
airflow/models.py
|
hellosoda/incubator-airflow
|
1f0a717b65e0ea7e0127708b084baff0697f0946
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
airflow/models.py
|
hellosoda/incubator-airflow
|
1f0a717b65e0ea7e0127708b084baff0697f0946
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
airflow/models.py
|
hellosoda/incubator-airflow
|
1f0a717b65e0ea7e0127708b084baff0697f0946
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
from builtins import str
from builtins import object, bytes
import copy
from collections import namedtuple, defaultdict
import cryptography
from datetime import timedelta
import dill
import functools
import getpass
import imp
import importlib
import itertools
import zipfile
import jinja2
import json
import logging
import os
import pickle
import re
import signal
import sys
import textwrap
import traceback
import warnings
import hashlib
import uuid
from datetime import datetime
from urllib.parse import urlparse, quote, parse_qsl
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float, LargeBinary)
from sqlalchemy import func, or_, and_, true as sqltrue
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import reconstructor, relationship, synonym
from sqlalchemy_utc import UtcDateTime
from croniter import croniter
import six
from airflow import settings, utils
from airflow.executors import GetDefaultExecutor, LocalExecutor
from airflow import configuration
from airflow.exceptions import (
AirflowDagCycleException, AirflowException, AirflowSkipException, AirflowTaskTimeout
)
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.lineage import apply_lineage, prepare_lineage
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import timezone
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
from airflow.utils.helpers import (
as_tuple, is_container, validate_key, pprinttable)
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
from airflow.utils.net import get_hostname
from airflow.utils.log.logging_mixin import LoggingMixin
install_aliases()
Base = declarative_base()
ID_LEN = 250
XCOM_RETURN_KEY = 'return_value'
Stats = settings.Stats
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: AirflowException if there's a problem trying to load Fernet
"""
try:
from cryptography.fernet import Fernet
except ImportError:
raise AirflowException('Failed to import Fernet, it may not be installed')
try:
return Fernet(configuration.conf.get('core', 'FERNET_KEY').encode('utf-8'))
except (ValueError, TypeError) as ve:
raise AirflowException("Could not create Fernet object: {}".format(ve))
# Used by DAG context_managers
_CONTEXT_MANAGER_DAG = None
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
class DagBag(BaseDagBag, LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: unicode
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
"""
# static class variables to detetct dag cycle
CYCLE_NEW = 0
CYCLE_IN_PROGRESS = 1
CYCLE_DONE = 2
def __init__(
self,
dag_folder=None,
executor=None,
include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES')):
# do not use default arg in signature, to fix import cycle on plugin load
if executor is None:
executor = GetDefaultExecutor()
dag_folder = dag_folder or settings.DAGS_FOLDER
self.log.info("Filling up the DagBag from %s", dag_folder)
self.dag_folder = dag_folder
self.dags = {}
# the file's last modified timestamp when we last read it
self.file_last_changed = {}
self.executor = executor
self.import_errors = {}
if include_examples:
example_dag_folder = os.path.join(
os.path.dirname(__file__),
'example_dags')
self.collect_dags(example_dag_folder)
self.collect_dags(dag_folder)
def size(self):
"""
:return: the amount of dags contained in this dagbag
"""
return len(self.dags)
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
found_dags = []
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
self.log.exception(e)
return found_dags
mods = []
if not zipfile.is_zipfile(filepath):
if safe_mode and os.path.isfile(filepath):
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(configuration.conf.getint('core', "DAGBAG_IMPORT_TIMEOUT")):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.log.debug("Reading %s from %s", mod.filename, filepath)
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
return found_dags
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
if dag.fileloc != filepath:
dag.fileloc = filepath
try:
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
found_dags.append(dag)
found_dags += dag.subdags
except AirflowDagCycleException as cycle_exception:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = str(cycle_exception)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@provide_session
def kill_zombies(self, session=None):
"""
Fails tasks that haven't had a heartbeat in too long
"""
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = TaskInstance
secs = configuration.conf.getint('scheduler', 'scheduler_zombie_task_threshold')
limit_dttm = timezone.utcnow() - timedelta(seconds=secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
))
.all()
)
for ti in tis:
if ti and ti.dag_id in self.dags:
dag = self.dags[ti.dag_id]
if ti.task_id in dag.task_ids:
task = dag.get_task(ti.task_id)
ti.task = task
ti.handle_failure("{} killed as zombie".format(str(ti)))
self.log.info('Marked zombie job %s as failed', ti)
Stats.incr('zombies_killed')
session.commit()
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
"""
dag.test_cycle() # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
for task in dag.tasks:
settings.policy(task)
subdags = dag.subdags
try:
for subdag in subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.dags[dag.dag_id] = dag
self.log.debug('Loaded DAG {dag}'.format(**locals()))
except AirflowDagCycleException as cycle_exception:
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception('Exception bagging dag: {dag.dag_id}'.format(**locals()))
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if dag == root_dag:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise cycle_exception
def collect_dags(
self,
dag_folder=None,
only_if_updated=True):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a .airflowignore file is found while processing,
the directory, it will behaves much like a .gitignore does,
ignoring files that match any of the regex patterns specified
in the file.
"""
start_dttm = timezone.utcnow()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
if os.path.isfile(dag_folder):
self.process_file(dag_folder, only_if_updated=only_if_updated)
elif os.path.isdir(dag_folder):
for root, dirs, files in os.walk(dag_folder, followlinks=True):
patterns = []
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
patterns += [p for p in f.read().split('\n') if p]
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(filepath):
continue
if not any(
[re.findall(p, filepath) for p in patterns]):
ts = timezone.utcnow()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated)
td = timezone.utcnow() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
self.log.exception(e)
Stats.gauge(
'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.task_num for o in stats]),
table=pprinttable(stats),
)
@provide_session
def deactivate_inactive_dags(self, session=None):
active_dag_ids = [dag.dag_id for dag in list(self.dags.values())]
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
@provide_session
def paused_dags(self, session=None):
dag_ids = [dp.dag_id for dp in session.query(DagModel).filter(
DagModel.is_paused.__eq__(True))]
return dag_ids
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
superuser = False
def __repr__(self):
return self.username
def get_id(self):
return str(self.id)
def is_superuser(self):
return self.superuser
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry',),
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('jenkins', 'Jenkins'),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA',),
('redis', 'Redis',),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks',),
('aws', 'Amazon Web Services',),
('emr', 'Elastic MapReduce',),
('snowflake', 'Snowflake',),
('segment', 'Segment',),
('azure_data_lake', 'Azure Data Lake'),
('cassandra', 'Cassandra',),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
temp_uri = urlparse(uri)
hostname = temp_uri.hostname or ''
if '%2f' in hostname:
hostname = hostname.replace('%2f', '/').replace('%2F', '/')
conn_type = temp_uri.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
self.conn_type = conn_type
self.host = hostname
self.schema = temp_uri.path[1:]
self.login = temp_uri.username
self.password = temp_uri.password
self.port = temp_uri.port
if temp_uri.query:
self.extra = json.dumps(dict(parse_qsl(temp_uri.query)))
def get_password(self):
if self._password and self.is_encrypted:
try:
fernet = get_fernet()
except AirflowException:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
try:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except AirflowException:
self.log.exception("Failed to load fernet while encrypting value, "
"using non-encrypted value.")
self._password = value
self.is_encrypted = False
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
try:
fernet = get_fernet()
except AirflowException:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
try:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = True
except AirflowException:
self.log.exception("Failed to load fernet while encrypting value, "
"using non-encrypted value.")
self._extra = value
self.is_extra_encrypted = False
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def get_hook(self):
try:
if self.conn_type == 'mysql':
from airflow.hooks.mysql_hook import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.hooks.postgres_hook import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.hooks.hive_hooks import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.hooks.presto_hook import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.hooks.hive_hooks import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.hooks.sqlite_hook import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.hooks.jdbc_hook import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.hooks.mssql_hook import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.hooks.oracle_hook import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.contrib.hooks.vertica_hook import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.contrib.hooks.cloudant_hook import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.contrib.hooks.jira_hook import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.contrib.hooks.redis_hook import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.contrib.hooks.wasb_hook import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.hooks.docker_hook import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.contrib.hooks.cassandra_hook import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
except Exception:
pass
def __repr__(self):
return self.conn_id
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
class DagPickle(Base):
"""
Dags can originate from different places (user repos, master repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(UtcDateTime, default=timezone.utcnow)
pickle_hash = Column(Text)
__tablename__ = "dag_pickle"
def __init__(self, dag):
self.dag_id = dag.dag_id
if hasattr(dag, 'template_env'):
dag.template_env = None
self.pickle_hash = hash(dag)
self.pickle = dag
class TaskInstance(Base, LoggingMixin):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
state = Column(String(20))
_try_number = Column('try_number', Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer, index=True)
pool = Column(String(50))
queue = Column(String(50))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(UtcDateTime)
pid = Column(Integer)
executor_config = Column(PickleType(pickler=dill))
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_state', state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
)
def __init__(self, task, execution_date, state=None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.task = task
self._log = logging.getLogger("airflow.task")
# make sure we have a localized execution_date stored in UTC
if execution_date and not timezone.is_localized(execution_date):
self.log.warning("execution date %s has no timezone information. Using "
"default from dag or system", execution_date)
if self.task.has_dag():
execution_date = timezone.make_aware(execution_date,
self.task.dag.timezone)
else:
execution_date = timezone.make_aware(execution_date)
execution_date = timezone.convert_to_utc(execution_date)
self.execution_date = execution_date
self.queue = task.queue
self.pool = task.pool
self.priority_weight = task.priority_weight_total
self.try_number = 0
self.max_tries = self.task.retries
self.unixname = getpass.getuser()
self.run_as_user = task.run_as_user
if state:
self.state = state
self.hostname = ''
self.executor_config = task.executor_config
self.init_on_load()
# Is this TaskInstance being currently running within `airflow run --raw`.
# Not persisted to the database so only valid for the current process
self.raw = False
@reconstructor
def init_on_load(self):
""" Initialize the attributes that aren't stored in the DB. """
self.test_mode = False # can be changed when calling 'run'
@property
def try_number(self):
"""
Return the try number that this task number will be when it is acutally
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1
@try_number.setter
def try_number(self, value):
self._try_number = value
@property
def next_try_number(self):
return self._try_number + 1
def command(
self,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
return " ".join(self.command_as_list(
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path))
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag = self.task.dag
should_pass_filepath = not pickle_id and dag
if should_pass_filepath and dag.full_filepath != dag.filepath:
path = "DAGS_FOLDER/{}".format(dag.filepath)
elif should_pass_filepath and dag.full_filepath:
path = dag.full_filepath
else:
path = None
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
self.execution_date,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path)
@staticmethod
def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:param cfg_path: the Path to the configuration file
:type cfg_path: basestring
:return: shell command that can be used to run the task instance
"""
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A"]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(configuration.conf.get('core', 'BASE_LOG_FOLDER'))
return (
"{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
@property
def log_url(self):
iso = quote(self.execution_date.isoformat())
BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
if settings.RBAC:
return BASE_URL + (
"/log/list/"
"?_flt_3_dag_id={self.dag_id}"
"&_flt_3_task_id={self.task_id}"
"&_flt_3_execution_date={iso}"
).format(**locals())
else:
return BASE_URL + (
"/admin/airflow/log"
"?dag_id={self.dag_id}"
"&task_id={self.task_id}"
"&execution_date={iso}"
).format(**locals())
@property
def mark_success_url(self):
iso = quote(self.execution_date.isoformat())
BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
if settings.RBAC:
return BASE_URL + (
"/success"
"?task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
else:
return BASE_URL + (
"/admin/airflow/success"
"?task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
@provide_session
def current_state(self, session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state
@provide_session
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session=None, lock_for_update=False):
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremeneted by one already.
self.try_number = ti._try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
self.executor_config = ti.executor_config
else:
self.state = None
@provide_session
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
@property
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date
@provide_session
def set_state(self, state, session=None):
self.state = state
self.start_date = timezone.utcnow()
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
@property
def is_premature(self):
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == State.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
dr = self.get_dagrun(session=session)
# LEGACY: most likely running from unit tests
if not dr:
# Means that this TI is NOT being run from a DR, but from a catchup
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task,
execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
@provide_session
def are_dependencies_met(
self,
dep_context=None,
session=None,
verbose=False):
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:type dep_context: DepContext
:param session: database session
:type session: Session
:param verbose: whether or not to print details on failed dependencies
:type verbose: boolean
"""
dep_context = dep_context or DepContext()
failed = False
for dep_status in self.get_failed_dep_statuses(
dep_context=dep_context,
session=session):
failed = True
if verbose:
self.log.info(
"Dependencies not met for %s, dependency '%s' FAILED: %s",
self, dep_status.dep_name, dep_status.reason
)
if failed:
return False
if verbose:
self.log.info("Dependencies all met for %s", self)
return True
@provide_session
def get_failed_dep_statuses(
self,
dep_context=None,
session=None):
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(
self,
session,
dep_context):
self.log.debug(
"%s dependency '%s' PASSED: %s, %s",
self, dep_status.dep_name, dep_status.passed, dep_status.reason
)
if not dep_status.passed:
yield dep_status
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < timezone.utcnow())
@provide_session
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
@provide_session
def get_dagrun(self, session):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr
@provide_session
def _check_and_change_state_before_execution(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Checks dependencies and then sets state to RUNNING if they are met. Returns
True if and only if state is set to RUNNING, which implies that task should be
executed, in preparation for _run_raw_task
:param verbose: whether to turn on more verbose logging
:type verbose: boolean
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Don't check the dependencies of this TI's task
:type ignore_task_deps: boolean
:param ignore_ti_state: Disregards previous task instance state
:type ignore_ti_state: boolean
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
:return: whether the state was changed to running or not
:rtype: bool
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = get_hostname()
self.operator = task.__class__.__name__
if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS:
Stats.incr('previously_succeeded', 1, 1)
queue_dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps)
if not self.are_dependencies_met(
dep_context=queue_dep_context,
session=session,
verbose=True):
session.commit()
return False
# TODO: Logging needs cleanup, not clear what is being printed
hr = "\n" + ("-" * 80) + "\n" # Line break
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
msg = "Starting attempt {attempt} of {total}".format(
attempt=self.try_number,
total=self.max_tries + 1)
self.start_date = timezone.utcnow()
dep_context = DepContext(
deps=RUN_DEPS - QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
runnable = self.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True)
if not runnable and not mark_success:
# FIXME: we might have hit concurrency limits, which means we probably
# have been running prematurely. This should be handled in the
# scheduling mechanism.
self.state = State.NONE
msg = ("FIXME: Rescheduling due to concurrency limits reached at task "
"runtime. Attempt {attempt} of {total}. State set to NONE.").format(
attempt=self.try_number,
total=self.max_tries + 1)
self.log.warning(hr + msg + hr)
self.queued_dttm = timezone.utcnow()
self.log.info("Queuing into pool %s", self.pool)
session.merge(self)
session.commit()
return False
# Another worker might have started running this task instance while
# the current worker process was blocked on refresh_from_db
if self.state == State.RUNNING:
msg = "Task Instance already running {}".format(self)
self.log.warning(msg)
session.commit()
return False
# print status message
self.log.info(hr + msg + hr)
self._try_number += 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.pid = os.getpid()
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose()
if verbose:
if mark_success:
msg = "Marking success for {} on {}".format(self.task,
self.execution_date)
self.log.info(msg)
else:
msg = "Executing {} on {}".format(self.task, self.execution_date)
self.log.info(msg)
return True
@provide_session
def _run_raw_task(
self,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Immediately runs the task (without checking or changing db state
before execution) and then sets the appropriate final state after
completion and runs any post-execute callbacks. Meant to be called
only after another function changes the state to running.
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session)
self.job_id = job_id
self.hostname = get_hostname()
self.operator = task.__class__.__name__
context = {}
try:
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
self.log.error("Received SIGTERM. Terminating subprocesses.")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
self.render_templates()
task_copy.pre_execute(context=context)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
# TODO remove deprecated behavior in Airflow 2.0
try:
task_copy.post_execute(context=context, result=result)
except TypeError as e:
if 'unexpected keyword argument' in str(e):
warnings.warn(
'BaseOperator.post_execute() now takes two '
'arguments, `context` and `result`, but "{}" only '
'expected one. This behavior is deprecated and '
'will be removed in a future version of '
'Airflow.'.format(self.task_id),
category=DeprecationWarning)
task_copy.post_execute(context=context)
else:
raise
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
Stats.incr('ti_successes')
self.refresh_from_db(lock_for_update=True)
self.state = State.SUCCESS
except AirflowSkipException:
self.refresh_from_db(lock_for_update=True)
self.state = State.SKIPPED
except AirflowException as e:
self.refresh_from_db()
# for case when task is marked as success externally
# current behavior doesn't hit the success callback
if self.state == State.SUCCESS:
return
else:
self.handle_failure(e, test_mode, context)
raise
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Recording SUCCESS
self.end_date = timezone.utcnow()
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
self.log.error("Failed when executing success callback")
self.log.exception(e3)
session.commit()
@provide_session
def run(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
res = self._check_and_change_state_before_execution(
verbose=verbose,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
if res:
self._run_raw_task(
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
def dry_run(self):
task = self.task
task_copy = copy.copy(task)
self.task = task_copy
self.render_templates()
task_copy.dry_run()
@provide_session
def handle_failure(self, error, test_mode=False, context=None, session=None):
self.log.exception(error)
task = self.task
self.end_date = timezone.utcnow()
self.set_duration()
Stats.incr('operator_failures_{}'.format(task.__class__.__name__), 1, 1)
Stats.incr('ti_failures')
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
session.add(TaskFail(task, self.execution_date, self.start_date, self.end_date))
# Let's go deeper
try:
# Since this function is called only when the TI state is running,
# try_number contains the current try_number (not the next). We
# only mark task instance as FAILED if the next task instance
# try_number exceeds the max_tries.
if task.retries and self.try_number <= self.max_tries:
self.state = State.UP_FOR_RETRY
self.log.info('Marking task as UP_FOR_RETRY')
if task.email_on_retry and task.email:
self.email_alert(error, is_retry=True)
else:
self.state = State.FAILED
if task.retries:
self.log.info('All retries failed; marking task as FAILED')
else:
self.log.info('Marking task as FAILED.')
if task.email_on_failure and task.email:
self.email_alert(error, is_retry=False)
except Exception as e2:
self.log.error('Failed to send email to: %s', task.email)
self.log.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
self.log.error("Failed at executing callback")
self.log.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
self.log.error(str(error))
@provide_session
def get_template_context(self, session=None):
task = self.task
from airflow import macros
tables = None
if 'tables' in task.params:
tables = task.params['tables']
ds = self.execution_date.isoformat()[:10]
ts = self.execution_date.isoformat()
yesterday_ds = (self.execution_date - timedelta(1)).isoformat()[:10]
tomorrow_ds = (self.execution_date + timedelta(1)).isoformat()[:10]
prev_execution_date = task.dag.previous_schedule(self.execution_date)
next_execution_date = task.dag.following_schedule(self.execution_date)
ds_nodash = ds.replace('-', '')
ts_nodash = ts.replace('-', '').replace(':', '')
yesterday_ds_nodash = yesterday_ds.replace('-', '')
tomorrow_ds_nodash = tomorrow_ds.replace('-', '')
ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}"
ti_key_str = ti_key_str.format(**locals())
params = {}
run_id = ''
dag_run = None
if hasattr(task, 'dag'):
if task.dag.params:
params.update(task.dag.params)
dag_run = (
session.query(DagRun)
.filter_by(
dag_id=task.dag.dag_id,
execution_date=self.execution_date)
.first()
)
run_id = dag_run.run_id if dag_run else None
session.expunge_all()
session.commit()
if task.params:
params.update(task.params)
class VariableAccessor:
"""
Wrapper around Variable. This way you can get variables in templates by using
{var.value.your_variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item)
return self.var
def __repr__(self):
return str(self.var)
class VariableJsonAccessor:
"""
Wrapper around deserialized Variables. This way you can get variables
in templates by using {var.json.your_variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item, deserialize_json=True)
return self.var
def __repr__(self):
return str(self.var)
return {
'dag': task.dag,
'ds': ds,
'ds_nodash': ds_nodash,
'ts': ts,
'ts_nodash': ts_nodash,
'yesterday_ds': yesterday_ds,
'yesterday_ds_nodash': yesterday_ds_nodash,
'tomorrow_ds': tomorrow_ds,
'tomorrow_ds_nodash': tomorrow_ds_nodash,
'END_DATE': ds,
'end_date': ds,
'dag_run': dag_run,
'run_id': run_id,
'execution_date': self.execution_date,
'prev_execution_date': prev_execution_date,
'next_execution_date': next_execution_date,
'latest_date': ds,
'macros': macros,
'params': params,
'tables': tables,
'task': task,
'task_instance': self,
'ti': self,
'task_instance_key_str': ti_key_str,
'conf': configuration,
'test_mode': self.test_mode,
'var': {
'value': VariableAccessor(),
'json': VariableJsonAccessor()
},
'inlets': task.inlets,
'outlets': task.outlets,
}
def render_templates(self):
task = self.task
jinja_context = self.get_template_context()
if hasattr(self, 'task') and hasattr(self.task, 'dag'):
if self.task.dag.user_defined_macros:
jinja_context.update(
self.task.dag.user_defined_macros)
rt = self.task.render_template # shortcut to method
for attr in task.__class__.template_fields:
content = getattr(task, attr)
if content:
rendered_content = rt(attr, content, jinja_context)
setattr(task, attr, rendered_content)
def email_alert(self, exception, is_retry=False):
task = self.task
title = "Airflow alert: {self}".format(**locals())
exception = str(exception).replace('\n', '<br>')
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
body = (
"Try {try_number} out of {max_tries}<br>"
"Exception:<br>{exception}<br>"
"Log: <a href='{self.log_url}'>Link</a><br>"
"Host: {self.hostname}<br>"
"Log file: {self.log_filepath}<br>"
"Mark success: <a href='{self.mark_success_url}'>Link</a><br>"
).format(try_number=self.try_number, max_tries=self.max_tries + 1, **locals())
send_email(task.email, title, body)
def set_duration(self):
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
def xcom_push(
self,
key,
value,
execution_date=None):
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: string
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date)
def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: string
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: string or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: string
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids)
@provide_session
def get_num_running_task_instances(self, session):
TI = TaskInstance
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.state == State.RUNNING
).count()
def init_run_context(self, raw=False):
"""
Sets the log context.
"""
self.raw = raw
self._set_context(self)
class TaskFail(Base):
"""
TaskFail tracks the failed run durations of each task instance.
"""
__tablename__ = "task_fail"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
def __init__(self, task, execution_date, start_date, end_date):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.start_date = start_date
self.end_date = end_date
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
class Log(Base):
"""
Used to actively log events to the database
"""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(UtcDateTime)
dag_id = Column(String(ID_LEN))
task_id = Column(String(ID_LEN))
event = Column(String(30))
execution_date = Column(UtcDateTime)
owner = Column(String(500))
extra = Column(Text)
def __init__(self, event, task_instance, owner=None, extra=None, **kwargs):
self.dttm = timezone.utcnow()
self.event = event
self.extra = extra
task_owner = None
if task_instance:
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
task_owner = task_instance.task.owner
if 'task_id' in kwargs:
self.task_id = kwargs['task_id']
if 'dag_id' in kwargs:
self.dag_id = kwargs['dag_id']
if 'execution_date' in kwargs:
if kwargs['execution_date']:
self.execution_date = kwargs['execution_date']
self.owner = owner or task_owner
class SkipMixin(LoggingMixin):
@provide_session
def skip(self, dag_run, execution_date, tasks, session=None):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
:param session: db session to use
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = timezone.utcnow()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state: State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
self.log.warning("No DAG RUN present this should not happen")
# this is defensive against dag runs that are not complete
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit()
@functools.total_ordering
class BaseOperator(LoggingMixin):
"""
Abstract base class for all operators. Since operators create objects that
become nodes in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: string
:param owner: the owner of the task, using the unix username is recommended
:type owner: string
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: timedelta
:param retry_exponential_backoff: allow progressive longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries
:type max_retry_delay: timedelta
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up.
:type priority_weight: int
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downtream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
DAGS. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``
:type weight_rule: str
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send an email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: callable
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | dummy}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
:param task_concurrency: When set, a task will be able to limit the concurrent
runs across execution_dates
:type task_concurrency: int
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
``example: to run this task in a specific docker container through
the KubernetesExecutor
MyOperator(...,
executor_config={
"KubernetesExecutor":
{"image": "myCustomDockerImage"}
}
)``
:type executor_config: dict
"""
# For derived classes to define which fields will get jinjaified
template_fields = []
# Defines which files extensions to look for in the templated fields
template_ext = []
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
@apply_defaults
def __init__(
self,
task_id,
owner=configuration.conf.get('operators', 'DEFAULT_OWNER'),
email=None,
email_on_retry=True,
email_on_failure=True,
retries=0,
retry_delay=timedelta(seconds=300),
retry_exponential_backoff=False,
max_retry_delay=None,
start_date=None,
end_date=None,
schedule_interval=None, # not hooked as of now
depends_on_past=False,
wait_for_downstream=False,
dag=None,
params=None,
default_args=None,
adhoc=False,
priority_weight=1,
weight_rule=WeightRule.DOWNSTREAM,
queue=configuration.conf.get('celery', 'default_queue'),
pool=None,
sla=None,
execution_timeout=None,
on_failure_callback=None,
on_success_callback=None,
on_retry_callback=None,
trigger_rule=TriggerRule.ALL_SUCCESS,
resources=None,
run_as_user=None,
task_concurrency=None,
executor_config=None,
inlets=None,
outlets=None,
*args,
**kwargs):
if args or kwargs:
# TODO remove *args and **kwargs in Airflow 2.0
warnings.warn(
'Invalid arguments were passed to {c}. Support for '
'passing such arguments will be dropped in Airflow 2.0. '
'Invalid arguments were:'
'\n*args: {a}\n**kwargs: {k}'.format(
c=self.__class__.__name__, a=args, k=kwargs),
category=PendingDeprecationWarning
)
validate_key(task_id)
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
self.log.warning("start_date for %s isn't datetime.datetime", self)
self.end_date = end_date
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
"The trigger_rule must be one of {all_triggers},"
"'{d}.{t}'; received '{tr}'."
.format(all_triggers=TriggerRule.all_triggers,
d=dag.dag_id if dag else "", t=task_id, tr=trigger_rule))
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if schedule_interval:
self.log.warning(
"schedule_interval is used for %s, though it has "
"been deprecated as a task parameter, you need to "
"specify it as a DAG parameter instead",
self
)
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
self.log.debug("Retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
self.params = params or {} # Available in templates!
self.adhoc = adhoc
self.priority_weight = priority_weight
if not WeightRule.is_valid(weight_rule):
raise AirflowException(
"The weight_rule must be one of {all_weight_rules},"
"'{d}.{t}'; received '{tr}'."
.format(all_weight_rules=WeightRule.all_weight_rules,
d=dag.dag_id if dag else "", t=task_id, tr=weight_rule))
self.weight_rule = weight_rule
self.resources = Resources(**(resources or {}))
self.run_as_user = run_as_user
self.task_concurrency = task_concurrency
self.executor_config = executor_config or {}
# Private attributes
self._upstream_task_ids = set()
self._downstream_task_ids = set()
if not dag and _CONTEXT_MANAGER_DAG:
dag = _CONTEXT_MANAGER_DAG
if dag:
self.dag = dag
self._log = logging.getLogger("airflow.task.operators")
# lineage
self.inlets = []
self.outlets = []
self.lineage_data = None
self._inlets = {
"auto": False,
"task_ids": [],
"datasets": [],
}
self._outlets = {
"datasets": [],
}
if inlets:
self._inlets.update(inlets)
if outlets:
self._outlets.update(outlets)
self._comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'schedule_interval',
'depends_on_past',
'wait_for_downstream',
'adhoc',
'priority_weight',
'sla',
'execution_timeout',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
}
def __eq__(self, other):
return (
type(self) == type(other) and
all(self.__dict__.get(c, None) == other.__dict__.get(c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.task_id < other.task_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Composing Operators -----------------------------------------------
def __rshift__(self, other):
"""
Implements Self >> Other == self.set_downstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_downstream(other)
return other
def __lshift__(self, other):
"""
Implements Self << Other == self.set_upstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_upstream(other)
return other
def __rrshift__(self, other):
"""
Called for [DAG] >> [Operator] because DAGs don't have
__rshift__ operators.
"""
self.__lshift__(other)
return self
def __rlshift__(self, other):
"""
Called for [DAG] << [Operator] because DAGs don't have
__lshift__ operators.
"""
self.__rshift__(other)
return self
# /Composing Operators ---------------------------------------------
@property
def dag(self):
"""
Returns the Operator's DAG if set, otherwise raises an error
"""
if self.has_dag():
return self._dag
else:
raise AirflowException(
'Operator {} has not been assigned to a DAG yet'.format(self))
@dag.setter
def dag(self, dag):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
if not isinstance(dag, DAG):
raise TypeError(
'Expected DAG; received {}'.format(dag.__class__.__name__))
elif self.has_dag() and self.dag is not dag:
raise AirflowException(
"The DAG assigned to {} can not be changed.".format(self))
elif self.task_id not in dag.task_dict:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""
Returns True if the Operator has been assigned to a DAG.
"""
return getattr(self, '_dag', None) is not None
@property
def dag_id(self):
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
@property
def deps(self):
"""
Returns the list of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
return {
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
}
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if self.has_dag():
return self.dag._schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
return self.priority_weight + sum(
map(lambda task_id: self._dag.task_dict[task_id].priority_weight,
self.get_flat_relative_ids(upstream=upstream))
)
@prepare_lineage
def pre_execute(self, context):
"""
This hook is triggered right before self.execute() is called.
"""
pass
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
@apply_lineage
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
pass
def on_kill(self):
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
pass
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters',
'params', '_log'):
setattr(result, k, copy.deepcopy(v, memo))
result.params = self.params
if hasattr(self, 'user_defined_macros'):
result.user_defined_macros = self.user_defined_macros
if hasattr(self, 'user_defined_filters'):
result.user_defined_filters = self.user_defined_filters
if hasattr(self, '_log'):
result._log = self._log
return result
def __getstate__(self):
state = dict(self.__dict__)
del state['_log']
return state
def __setstate__(self, state):
self.__dict__ = state
self._log = logging.getLogger("airflow.task.operators")
def render_template_from_field(self, attr, content, context, jinja_env):
"""
Renders a template from a field. If the field is a string, it will
simply render the string and return the result. If it is a collection or
nested set of collections, it will traverse the structure and render
all strings in it.
"""
rt = self.render_template
if isinstance(content, six.string_types):
result = jinja_env.from_string(content).render(**context)
elif isinstance(content, (list, tuple)):
result = [rt(attr, e, context) for e in content]
elif isinstance(content, dict):
result = {
k: rt("{}[{}]".format(attr, k), v, context)
for k, v in list(content.items())}
else:
param_type = type(content)
msg = (
"Type '{param_type}' used for parameter '{attr}' is "
"not supported for templating").format(**locals())
raise AirflowException(msg)
return result
def render_template(self, attr, content, context):
"""
Renders a template either from a file or directly in a field, and returns
the rendered result.
"""
jinja_env = self.dag.get_template_env() \
if hasattr(self, 'dag') \
else jinja2.Environment(cache_size=0)
exts = self.__class__.template_ext
if (
isinstance(content, six.string_types) and
any([content.endswith(ext) for ext in exts])):
return jinja_env.get_template(content).render(**context)
else:
return self.render_template_from_field(attr, content, context, jinja_env)
def prepare_template(self):
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
pass
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if content is not None and \
isinstance(content, six.string_types) and \
any([content.endswith(ext) for ext in self.template_ext]):
env = self.dag.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
self.log.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self):
return self._upstream_task_ids
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self):
return self._downstream_task_ids
@provide_session
def clear(self,
start_date=None,
end_date=None,
upstream=False,
downstream=False,
session=None):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry.all(), session, dag=self.dag)
session.commit()
return count
def get_task_instances(self, session, start_date=None, end_date=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
TI = TaskInstance
end_date = end_date or timezone.utcnow()
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
).order_by(TI.execution_date).all()
def get_flat_relative_ids(self, upstream=False, found_descendants=None):
"""
Get a flat list of relatives' ids, either upstream or downstream.
"""
if not found_descendants:
found_descendants = set()
relative_ids = self.get_direct_relative_ids(upstream)
for relative_id in relative_ids:
if relative_id not in found_descendants:
found_descendants.add(relative_id)
relative_task = self._dag.task_dict[relative_id]
relative_task.get_flat_relative_ids(upstream,
found_descendants)
return found_descendants
def get_flat_relatives(self, upstream=False):
"""
Get a flat list of relatives, either upstream or downstream.
"""
return list(map(lambda task_id: self._dag.task_dict[task_id],
self.get_flat_relative_ids(upstream)))
def run(
self,
start_date=None,
end_date=None,
ignore_first_depends_on_past=False,
ignore_ti_state=False,
mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or timezone.utcnow()
for dt in self.dag.date_range(start_date, end_date=end_date):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_depends_on_past=(
dt == start_date and ignore_first_depends_on_past),
ignore_ti_state=ignore_ti_state)
def dry_run(self):
self.log.info('Dry run')
for attr in self.template_fields:
content = getattr(self, attr)
if content and isinstance(content, six.string_types):
self.log.info('Rendering template for %s', attr)
self.log.info(content)
def get_direct_relative_ids(self, upstream=False):
"""
Get the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self._upstream_task_ids
else:
return self._downstream_task_ids
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(
self=self)
@property
def task_type(self):
return self.__class__.__name__
def add_only_new(self, item_set, item):
if item in item_set:
raise AirflowException(
'Dependency {self}, {item} already registered'
''.format(**locals()))
else:
item_set.add(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for t in task_list:
if not isinstance(t, BaseOperator):
raise AirflowException(
"Relationships can only be set between "
"Operators; received {}".format(t.__class__.__name__))
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = {t._dag.dag_id: t._dag for t in [self] + task_list if t.has_dag()}
if len(dags) > 1:
raise AirflowException(
'Tried to set relationships between tasks in '
'more than one DAG: {}'.format(dags.values()))
elif len(dags) == 1:
dag = dags.popitem()[1]
else:
raise AirflowException(
"Tried to create relationships between tasks that don't have "
"DAGs yet. Set the DAG for at least one "
"task and try again: {}".format([self] + task_list))
if dag and not self.has_dag():
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
task.dag = dag
if upstream:
task.add_only_new(task._downstream_task_ids, self.task_id)
self.add_only_new(self._upstream_task_ids, task.task_id)
else:
self.add_only_new(self._downstream_task_ids, task.task_id)
task.add_only_new(task._upstream_task_ids, self.task_id)
def set_downstream(self, task_or_task_list):
"""
Set a task or a task list to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task or a task list to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
def xcom_push(
self,
context,
key,
value,
execution_date=None):
"""
See TaskInstance.xcom_push()
"""
context['ti'].xcom_push(
key=key,
value=value,
execution_date=execution_date)
def xcom_pull(
self,
context,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=None):
"""
See TaskInstance.xcom_pull()
"""
return context['ti'].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = configuration.conf\
.getboolean('core',
'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(UtcDateTime)
# Last time this DAG was pickled
last_pickled = Column(UtcDateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(UtcDateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@classmethod
@provide_session
def get_current(cls, dag_id, session=None):
return session.query(cls).filter(cls.dag_id == dag_id).first()
@functools.total_ordering
class DAG(BaseDag, LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start end an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG
:type dag_id: string
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: string
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: string or list of stings
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration,
gantt, landing_times)
:type default_view: string
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
:type orientation: string
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
:param on_failure_callback: A function to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:type on_failure_callback: callable
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:type on_success_callback: callable
"""
def __init__(
self, dag_id,
description='',
schedule_interval=timedelta(days=1),
start_date=None, end_date=None,
full_filepath=None,
template_searchpath=None,
user_defined_macros=None,
user_defined_filters=None,
default_args=None,
concurrency=configuration.conf.getint('core', 'dag_concurrency'),
max_active_runs=configuration.conf.getint(
'core', 'max_active_runs_per_dag'),
dagrun_timeout=None,
sla_miss_callback=None,
default_view=configuration.conf.get('webserver', 'dag_default_view').lower(),
orientation=configuration.conf.get('webserver', 'dag_orientation'),
catchup=configuration.conf.getboolean('scheduler', 'catchup_by_default'),
on_success_callback=None, on_failure_callback=None,
params=None):
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = default_args or {}
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
# Properties from BaseDag
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id = None
self._description = description
# set file location to caller source path
self.fileloc = sys._getframe().f_back.f_code.co_filename
self.task_dict = dict()
# set timezone
if start_date and start_date.tzinfo:
self.timezone = start_date.tzinfo
elif 'start_date' in self.default_args and self.default_args['start_date']:
if isinstance(self.default_args['start_date'], six.string_types):
self.default_args['start_date'] = (
timezone.parse(self.default_args['start_date'])
)
self.timezone = self.default_args['start_date'].tzinfo
else:
self.timezone = settings.TIMEZONE
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
# also convert tasks
if 'start_date' in self.default_args:
self.default_args['start_date'] = (
timezone.convert_to_utc(self.default_args['start_date'])
)
if 'end_date' in self.default_args:
self.default_args['end_date'] = (
timezone.convert_to_utc(self.default_args['end_date'])
)
self.schedule_interval = schedule_interval
if schedule_interval in cron_presets:
self._schedule_interval = cron_presets.get(schedule_interval)
elif schedule_interval == '@once':
self._schedule_interval = None
else:
self._schedule_interval = schedule_interval
if isinstance(template_searchpath, six.string_types):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = timezone.utcnow()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
self.default_view = default_view
self.orientation = orientation
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self.on_success_callback = on_success_callback
self.on_failure_callback = on_failure_callback
self._comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
def __eq__(self, other):
return (
type(self) == type(other) and
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
all(getattr(self, c, None) == getattr(other, c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
global _CONTEXT_MANAGER_DAG
self._old_context_manager_dag = _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self
return self
def __exit__(self, _type, _value, _tb):
global _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self._old_context_manager_dag
# /Context Manager ----------------------------------------------
def date_range(self, start_date, num=None, end_date=timezone.utcnow()):
if num:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date,
num=num, delta=self._schedule_interval)
def following_schedule(self, dttm):
"""
Calculates the following schedule for this dag in local time
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, six.string_types):
dttm = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, dttm)
following = timezone.make_aware(cron.get_next(datetime), self.timezone)
return timezone.convert_to_utc(following)
elif isinstance(self._schedule_interval, timedelta):
return dttm + self._schedule_interval
def previous_schedule(self, dttm):
"""
Calculates the previous schedule for this dag in local time
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, six.string_types):
dttm = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, dttm)
prev = timezone.make_aware(cron.get_prev(datetime), self.timezone)
return timezone.convert_to_utc(prev)
elif isinstance(self._schedule_interval, timedelta):
return dttm - self._schedule_interval
def get_run_dates(self, start_date, end_date=None):
"""
Returns a list of dates between the interval received as parameter using this
dag's schedule interval. Returned dates can be used for execution dates.
:param start_date: the start date of the interval
:type start_date: datetime
:param end_date: the end date of the interval, defaults to timezone.utcnow()
:type end_date: datetime
:return: a list of dates within the interval following the dag's schedule
:rtype: list
"""
run_dates = []
using_start_date = start_date
using_end_date = end_date
# dates for dag runs
using_start_date = using_start_date or min([t.start_date for t in self.tasks])
using_end_date = using_end_date or timezone.utcnow()
# next run date for a subdag isn't relevant (schedule_interval for subdags
# is ignored) so we use the dag run's start date in the case of a subdag
next_run_date = (self.normalize_schedule(using_start_date)
if not self.is_subdag else using_start_date)
while next_run_date and next_run_date <= using_end_date:
run_dates.append(next_run_date)
next_run_date = self.following_schedule(next_run_date)
return run_dates
def normalize_schedule(self, dttm):
"""
Returns dttm + interval unless dttm is first interval then it returns dttm
"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
"""
Returns the last dag run for this dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored
"""
DR = DagRun
qry = session.query(DR).filter(
DR.dag_id == self.dag_id,
)
if not include_externally_triggered:
qry = qry.filter(DR.external_trigger.__eq__(False))
qry = qry.order_by(DR.execution_date.desc())
last = qry.first()
return last
@property
def dag_id(self):
return self._dag_id
@dag_id.setter
def dag_id(self, value):
self._dag_id = value
@property
def full_filepath(self):
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value):
self._full_filepath = value
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, value):
self._concurrency = value
@property
def description(self):
return self._description
@property
def pickle_id(self):
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value):
self._pickle_id = value
@property
def tasks(self):
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError(
'DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self):
return list(self.task_dict.keys())
@property
def active_task_ids(self):
return list(k for k, v in self.task_dict.items() if not v.adhoc)
@property
def active_tasks(self):
return [t for t in self.tasks if not t.adhoc]
@property
def filepath(self):
"""
File location of where the dag object is instantiated
"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self):
"""
Folder location of where the dag object is instantiated
"""
return os.path.dirname(self.full_filepath)
@property
def owner(self):
return ", ".join(list(set([t.owner for t in self.tasks])))
@property
@provide_session
def concurrency_reached(self, session=None):
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
@provide_session
def is_paused(self, session=None):
"""
Returns a boolean indicating whether this DAG is paused
"""
qry = session.query(DagModel).filter(
DagModel.dag_id == self.dag_id)
return qry.value('is_paused')
@provide_session
def handle_callback(self, dagrun, success=True, reason=None, session=None):
"""
Triggers the appropriate callback depending on the value of success, namely the
on_failure_callback or on_success_callback. This method gets the context of a
single TaskInstance part of this DagRun and passes that to the callable along
with a 'reason', primarily to differentiate DagRun failures.
.. note::
The logs end up in $AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log
:param dagrun: DagRun object
:param success: Flag to specify if failure or success callback should be called
:param reason: Completion reason
:param session: Database session
"""
callback = self.on_success_callback if success else self.on_failure_callback
if callback:
self.log.info('Executing dag callback function: {}'.format(callback))
tis = dagrun.get_task_instances(session=session)
ti = tis[-1] # get first TaskInstance of DagRun
ti.task = self.get_task(ti.task_id)
context = ti.get_template_context(session=session)
context.update({'reason': reason})
callback(context)
@provide_session
def get_active_runs(self, session=None):
"""
Returns a list of dag run execution dates currently running
:param session:
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_num_active_runs(self, external_trigger=None, session=None):
"""
Returns the number of active "running" dag runs
:param external_trigger: True for externally triggered active dag runs
:type external_trigger: bool
:param session:
:return: number greater than 0 for active dag runs
"""
query = (session
.query(DagRun)
.filter(DagRun.dag_id == self.dag_id)
.filter(DagRun.state == State.RUNNING))
if external_trigger is not None:
query = query.filter(DagRun.external_trigger == external_trigger)
return query.count()
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == execution_date)
.first())
return dagrun
@property
@provide_session
def latest_execution_date(self, session=None):
"""
Returns the latest date for which at least one dag run exists
"""
execution_date = session.query(func.max(DagRun.execution_date)).filter(
DagRun.dag_id == self.dag_id
).scalar()
return execution_date
@property
def subdags(self):
"""
Returns a list of the subdag objects associated to this DAG
"""
# Check SubDag for class but don't check class directly, see
# https://github.com/airbnb/airflow/issues/1168
from airflow.operators.subdag_operator import SubDagOperator
subdag_lst = []
for task in self.tasks:
if (isinstance(task, SubDagOperator) or
# TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator'):
subdag_lst.append(task.subdag)
subdag_lst += task.subdag.subdags
return subdag_lst
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self):
"""
Returns a jinja2 Environment while taking into account the DAGs
template_searchpath, user_defined_macros and user_defined_filters
"""
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath),
extensions=["jinja2.ext.do"],
cache_size=0)
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
def get_task_instances(
self, session, start_date=None, end_date=None, state=None):
TI = TaskInstance
if not start_date:
start_date = (timezone.utcnow() - timedelta(30)).date()
start_date = datetime.combine(start_date, datetime.min.time())
end_date = end_date or timezone.utcnow()
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
TI.task_id.in_([t.task_id for t in self.tasks]),
)
if state:
tis = tis.filter(TI.state == state)
tis = tis.order_by(TI.execution_date).all()
return tis
@property
def roots(self):
return [t for t in self.tasks if not t.downstream_list]
def topological_sort(self):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:return: list of tasks in topological order
"""
# copy the the tasks so we leave it unmodified
graph_unsorted = self.tasks[:]
graph_sorted = []
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted):
for edge in node.upstream_list:
if edge in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
graph_unsorted.remove(node)
graph_sorted.append(node)
if not acyclic:
raise AirflowException("A cyclic dependency occurred in dag: {}"
.format(self.dag_id))
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self, state=State.RUNNING, session=None):
drs = session.query(DagModel).filter_by(dag_id=self.dag_id).all()
dirty_ids = []
for dr in drs:
dr.state = state
dirty_ids.append(dr.dag_id)
DagStat.update(dirty_ids, session=session)
@provide_session
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False,
session=None):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
"""
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append(
TI.dag_id.like(dag.dag_id) &
TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(TI.state == State.FAILED)
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
do_it = True
if count == 0:
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(**locals())
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(tis.all(), session, dag=self)
if reset_dag_runs:
self.set_dag_runs_state(session=session)
else:
count = 0
print("Bail. Nothing was cleared.")
session.commit()
return count
@classmethod
def clear_dags(
cls, dags,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False):
all_tis = []
for dag in dags:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
reset_dag_runs=reset_dag_runs,
dry_run=True)
all_tis.extend(tis)
if dry_run:
return all_tis
count = len(all_tis)
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in all_tis])
question = (
"You are about to delete these {} tasks:\n"
"{}\n\n"
"Are you sure? (yes/no): ").format(count, ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
for dag in dags:
dag.clear(start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
reset_dag_runs=reset_dag_runs,
dry_run=False)
else:
count = 0
print("Bail. Nothing was cleared.")
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
return result
def sub_dag(self, task_regex, include_downstream=False,
include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
dag = copy.deepcopy(self)
regex_match = [
t for t in dag.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
dag.task_dict = {t.task_id: t for t in regex_match + also_include}
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_task_ids = t._upstream_task_ids.intersection(dag.task_dict.keys())
t._downstream_task_ids = t._downstream_task_ids.intersection(
dag.task_dict.keys())
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id):
if task_id in self.task_dict:
return self.task_dict[task_id]
raise AirflowException("Task {task_id} not found".format(**locals()))
@provide_session
def pickle_info(self, session=None):
d = {}
d['is_picklable'] = True
try:
dttm = timezone.utcnow()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = "{}".format(timezone.utcnow() - dttm)
except Exception as e:
self.log.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None):
dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = timezone.utcnow()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self):
"""
Shows an ascii tree representation of the DAG
"""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.upstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if task.task_id in self.task_dict:
# TODO: raise an error in Airflow 2.0
warnings.warn(
'The requested task could not be added to the DAG because a '
'task with task_id {} is already in the DAG. Starting in '
'Airflow 2.0, trying to overwrite a task will raise an '
'exception.'.format(task.task_id),
category=PendingDeprecationWarning)
else:
self.task_dict[task.task_id] = task
task.dag = self
self.task_count = len(self.task_dict)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
@provide_session
def db_merge(self, session=None):
BO = BaseOperator
tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all()
for t in tasks:
session.delete(t)
session.commit()
session.merge(self)
session.commit()
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
local=False,
executor=None,
donot_pickle=configuration.conf.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=False,
pool=None,
delay_on_limit_secs=1.0):
"""
Runs the DAG.
:param start_date: the start date of the range to run
:type start_date: datetime
:param end_date: the end date of the range to run
:type end_date: datetime
:param mark_success: True to mark jobs as succeeded without running them
:type mark_success: bool
:param local: True to run the tasks using the LocalExecutor
:type local: bool
:param executor: The executor instance to run the tasks
:type executor: BaseExecutor
:param donot_pickle: True to avoid pickling DAG object and send to workers
:type donot_pickle: bool
:param ignore_task_deps: True to skip upstream tasks
:type ignore_task_deps: bool
:param ignore_first_depends_on_past: True to ignore depends_on_past
dependencies for the first set of tasks only
:type ignore_first_depends_on_past: bool
:param pool: Resource pool to use
:type pool: string
:param delay_on_limit_secs: Time in seconds to wait before next attempt to run
dag run when max_active_runs limit has been reached
:type delay_on_limit_secs: float
"""
from airflow.jobs import BackfillJob
if not executor and local:
executor = LocalExecutor()
elif not executor:
executor = GetDefaultExecutor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool,
delay_on_limit_secs=delay_on_limit_secs)
job.run()
def cli(self):
"""
Exposes a CLI specific to this DAG
"""
from airflow.bin import cli
parser = cli.CLIFactory.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date=None,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date of this dag run
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: Session
"""
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state
)
session.add(run)
DagStat.set_dirty(dag_id=self.dag_id, session=session)
session.commit()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
run.refresh_from_db()
return run
@provide_session
def sync_to_db(self, owner=None, sync_time=None, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param dag: the DAG object to save to the DB
:type dag: DAG
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
if owner is None:
owner = self.owner
if sync_time is None:
sync_time = timezone.utcnow()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
if not orm_dag:
orm_dag = DagModel(dag_id=self.dag_id)
self.log.info("Creating ORM DAG for %s", self.dag_id)
orm_dag.fileloc = self.fileloc
orm_dag.is_subdag = self.is_subdag
orm_dag.owners = owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = sync_time
session.merge(orm_dag)
session.commit()
for subdag in self.subdags:
subdag.sync_to_db(owner=owner, sync_time=sync_time, session=session)
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
log = LoggingMixin().log
for dag in session.query(
DagModel).filter(DagModel.last_scheduler_run < expiration_date,
DagModel.is_active).all():
log.info(
"Deactivating DAG ID %s since it was last touched by the scheduler at %s",
dag.dag_id, dag.last_scheduler_run.isoformat()
)
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id.in_(task_ids))
if states is not None:
if None in states:
qry = qry.filter(or_(
TaskInstance.state.in_(states),
TaskInstance.state.is_(None)))
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
def test_cycle(self):
"""
Check to see if there are any cycles in the DAG. Returns False if no cycle found,
otherwise raises exception.
"""
# default of int is 0 which corresponds to CYCLE_NEW
visit_map = defaultdict(int)
for task_id in self.task_dict.keys():
# print('starting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_NEW:
self._test_cycle_helper(visit_map, task_id)
return False
def _test_cycle_helper(self, visit_map, task_id):
"""
Checks if a cycle exists from the input task using DFS traversal
"""
# print('Inspecting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_DONE:
return False
visit_map[task_id] = DagBag.CYCLE_IN_PROGRESS
task = self.task_dict[task_id]
for descendant_id in task.get_direct_relative_ids():
if visit_map[descendant_id] == DagBag.CYCLE_IN_PROGRESS:
msg = "Cycle detected in DAG. Faulty task: {0} to {1}".format(
task_id, descendant_id)
raise AirflowDagCycleException(msg)
else:
self._test_cycle_helper(visit_map, descendant_id)
visit_map[task_id] = DagBag.CYCLE_DONE
class Chart(Base):
__tablename__ = "chart"
id = Column(Integer, primary_key=True)
label = Column(String(200))
conn_id = Column(String(ID_LEN), nullable=False)
user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
chart_type = Column(String(100), default="line")
sql_layout = Column(String(50), default="series")
sql = Column(Text, default="SELECT series, x, y FROM table")
y_log_scale = Column(Boolean)
show_datatable = Column(Boolean)
show_sql = Column(Boolean, default=True)
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
"User", cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(UtcDateTime, default=timezone.utcnow)
def __repr__(self):
return self.label
class KnownEventType(Base):
__tablename__ = "known_event_type"
id = Column(Integer, primary_key=True)
know_event_type = Column(String(200))
def __repr__(self):
return self.know_event_type
class KnownEvent(Base):
__tablename__ = "known_event"
id = Column(Integer, primary_key=True)
label = Column(String(200))
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
user_id = Column(Integer(), ForeignKey('users.id'),)
known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),)
reported_by = relationship(
"User", cascade=False, cascade_backrefs=False, backref='known_events')
event_type = relationship(
"KnownEventType",
cascade=False,
cascade_backrefs=False, backref='known_events')
description = Column(Text)
def __repr__(self):
return self.label
class Variable(Base, LoggingMixin):
__tablename__ = "variable"
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __repr__(self):
# Hiding the value
return '{} : {}'.format(self.key, self._val)
def get_val(self):
log = LoggingMixin().log
if self._val and self.is_encrypted:
try:
fernet = get_fernet()
except Exception:
log.error("Can't decrypt _val for key={}, FERNET_KEY "
"configuration missing".format(self.key))
return None
try:
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
except cryptography.fernet.InvalidToken:
log.error("Can't decrypt _val for key={}, invalid token "
"or value".format(self.key))
return None
else:
return self._val
def set_val(self, value):
if value:
try:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except AirflowException:
self.log.exception(
"Failed to load fernet while encrypting value, "
"using non-encrypted value."
)
self._val = value
self.is_encrypted = False
@declared_attr
def val(cls):
return synonym('_val',
descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: String
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
default_sentinel = object()
obj = Variable.get(key, default_var=default_sentinel,
deserialize_json=deserialize_json)
if obj is default_sentinel:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
@classmethod
@provide_session
def get(cls, key, default_var=None, deserialize_json=False, session=None):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not None:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(cls, key, value, serialize_json=False, session=None):
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = str(value)
session.query(cls).filter(cls.key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
class XCom(Base, LoggingMixin):
"""
Base class for XCom objects.
"""
__tablename__ = "xcom"
id = Column(Integer, primary_key=True)
key = Column(String(512))
value = Column(LargeBinary)
timestamp = Column(
DateTime, default=timezone.utcnow, nullable=False)
execution_date = Column(UtcDateTime, nullable=False)
# source information
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
__table_args__ = (
Index('idx_xcom_dag_task_date', dag_id, task_id, execution_date, unique=False),
)
def __repr__(self):
return '<XCom "{key}" ({task_id} @ {execution_date})>'.format(
key=self.key,
task_id=self.task_id,
execution_date=self.execution_date)
@classmethod
@provide_session
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
enable_pickling=None,
session=None):
"""
Store an XCom value.
TODO: "pickling" has been deprecated and JSON is preferred. "pickling" will be
removed in Airflow 2.0. :param enable_pickling: If pickling is not enabled, the
XCOM value will be parsed as JSON instead.
:return: None
"""
session.expunge_all()
if enable_pickling is None:
enable_pickling = configuration.conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
value = pickle.dumps(value)
else:
try:
value = json.dumps(value).encode('UTF-8')
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
@classmethod
@provide_session
def get_one(cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
enable_pickling=None,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:param enable_pickling: If pickling is not enabled,
the XCOM value will be parsed to JSON instead.
:return: XCom value
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc()))
result = query.first()
if result:
if enable_pickling is None:
enable_pickling = configuration.conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
return pickle.loads(result.value)
else:
try:
return json.loads(result.value.decode('UTF-8'))
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
@classmethod
@provide_session
def get_many(cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
enable_pickling=None,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
results = query.all()
if enable_pickling is None:
enable_pickling = configuration.conf.getboolean(
'core', 'enable_xcom_pickling'
)
for result in results:
if enable_pickling:
result.value = pickle.loads(result.value)
else:
try:
result.value = json.loads(result.value.decode('UTF-8'))
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
return results
@classmethod
@provide_session
def delete(cls, xcoms, session=None):
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(
'Expected XCom; received {}'.format(xcom.__class__.__name__)
)
session.delete(xcom)
session.commit()
class DagStat(Base):
__tablename__ = "dag_stats"
dag_id = Column(String(ID_LEN), primary_key=True)
state = Column(String(50), primary_key=True)
count = Column(Integer, default=0)
dirty = Column(Boolean, default=False)
def __init__(self, dag_id, state, count=0, dirty=False):
self.dag_id = dag_id
self.state = state
self.count = count
self.dirty = dirty
@staticmethod
@provide_session
def set_dirty(dag_id, session=None):
"""
:param dag_id: the dag_id to mark dirty
:param session: database session
:return:
"""
DagStat.create(dag_id=dag_id, session=session)
try:
stats = session.query(DagStat).filter(
DagStat.dag_id == dag_id
).with_for_update().all()
for stat in stats:
stat.dirty = True
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not update dag stats for %s", dag_id)
log.exception(e)
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True) # noqa
qry = qry.with_for_update().all()
ids = set([dag_stat.dag_id for dag_stat in qry])
# avoid querying with an empty IN clause
if len(ids) == 0:
session.commit()
return
dagstat_states = set(itertools.product(ids, State.dag_states))
qry = (
session.query(DagRun.dag_id, DagRun.state, func.count('*'))
.filter(DagRun.dag_id.in_(ids))
.group_by(DagRun.dag_id, DagRun.state)
)
counts = {(dag_id, state): count for dag_id, state, count in qry}
for dag_id, state in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[(dag_id, state)]
session.merge(
DagStat(dag_id=dag_id, state=state, count=count, dirty=False)
)
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not update dag stat table")
log.exception(e)
@staticmethod
@provide_session
def create(dag_id, session=None):
"""
Creates the missing states the stats table for the dag specified
:param dag_id: dag id of the dag to create stats for
:param session: database session
:return:
"""
# unfortunately sqlalchemy does not know upsert
qry = session.query(DagStat).filter(DagStat.dag_id == dag_id).all()
states = [dag_stat.state for dag_stat in qry]
for state in State.dag_states:
if state not in states:
try:
session.merge(DagStat(dag_id=dag_id, state=state))
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not create stat record")
log.exception(e)
class DagRun(Base, LoggingMixin):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
ID_PREFIX = 'scheduled__'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(UtcDateTime, default=timezone.utcnow)
start_date = Column(UtcDateTime, default=timezone.utcnow)
end_date = Column(UtcDateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dr_run_id', dag_id, run_id, unique=True),
)
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
if self.dag_id is not None:
# FIXME: Due to the scoped_session factor we we don't get a clean
# session here, so something really weird goes on:
# if you try to close the session dag runs will end up detached
session = settings.Session()
DagStat.set_dirty(self.dag_id, session=session)
@declared_attr
def state(self):
return synonym('_state',
descriptor=property(self.get_state, self.set_state))
@classmethod
def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX):
return prefix.format(date.isoformat()[:19])
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: integer, list
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
TI = TaskInstance
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, six.string_types):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
tis = tis.filter(
or_(TI.state.in_(state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
self.log.info("Updating state for %s considering %s task(s)", self, len(tis))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = timezone.utcnow()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = True
for ut in unfinished_tasks:
# We need to flag upstream and check for changes because upstream
# failures can result in deadlock false positives
old_state = ut.state
deps_met = ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True),
session=session)
if deps_met or old_state != ut.current_state(session=session):
no_dependencies_met = False
break
duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
# future: remove the check on adhoc tasks (=active_tasks)
if len(tis) == len(dag.active_tasks):
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least one failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
self.log.info('Marking run %s failed', self)
self.state = State.FAILED
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
self.log.info('Marking run %s successful', self)
self.state = State.SUCCESS
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and no_dependencies_met):
self.log.info('Deadlock; marking run %s failed', self)
self.state = State.FAILED
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.state = State.RUNNING
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.adhoc:
continue
if task.task_id not in task_ids:
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
from airflow.jobs import BackfillJob
return self.run_id.startswith(BackfillJob.ID_PREFIX)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
class Pool(Base):
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(50), unique=True)
slots = Column(Integer, default=0)
description = Column(Text)
def __repr__(self):
return self.pool
def to_json(self):
return {
'id': self.id,
'pool': self.pool,
'slots': self.slots,
'description': self.description,
}
@provide_session
def used_slots(self, session):
"""
Returns the number of slots used at the moment
"""
running = (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.RUNNING)
.count()
)
return running
@provide_session
def queued_slots(self, session):
"""
Returns the number of slots used at the moment
"""
return (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.QUEUED)
.count()
)
@provide_session
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
used_slots = self.used_slots(session=session)
queued_slots = self.queued_slots(session=session)
return self.slots - used_slots - queued_slots
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(UtcDateTime)
description = Column(Text)
notification_sent = Column(Boolean, default=False)
def __repr__(self):
return str((
self.dag_id, self.task_id, self.execution_date.isoformat()))
class ImportError(Base):
__tablename__ = "import_error"
id = Column(Integer, primary_key=True)
timestamp = Column(UtcDateTime)
filename = Column(String(1024))
stacktrace = Column(Text)
class KubeResourceVersion(Base):
__tablename__ = "kube_resource_version"
one_row_id = Column(Boolean, server_default=sqltrue(), primary_key=True)
resource_version = Column(String(255))
@staticmethod
@provide_session
def get_current_resource_version(session=None):
(resource_version,) = session.query(KubeResourceVersion.resource_version).one()
return resource_version
@staticmethod
@provide_session
def checkpoint_resource_version(resource_version, session=None):
if resource_version:
session.query(KubeResourceVersion).update({
KubeResourceVersion.resource_version: resource_version
})
session.commit()
@staticmethod
@provide_session
def reset_resource_version(session=None):
session.query(KubeResourceVersion).update({
KubeResourceVersion.resource_version: '0'
})
session.commit()
return '0'
class KubeWorkerIdentifier(Base):
__tablename__ = "kube_worker_uuid"
one_row_id = Column(Boolean, server_default=sqltrue(), primary_key=True)
worker_uuid = Column(String(255))
@staticmethod
@provide_session
def get_or_create_current_kube_worker_uuid(session=None):
(worker_uuid,) = session.query(KubeWorkerIdentifier.worker_uuid).one()
if worker_uuid == '':
worker_uuid = str(uuid.uuid4())
KubeWorkerIdentifier.checkpoint_kube_worker_uuid(worker_uuid, session)
return worker_uuid
@staticmethod
@provide_session
def checkpoint_kube_worker_uuid(worker_uuid, session=None):
if worker_uuid:
session.query(KubeWorkerIdentifier).update({
KubeWorkerIdentifier.worker_uuid: worker_uuid
})
session.commit()
| 36.768508
| 90
| 0.592845
|
062a60aa4ce706cf05420419e558b75e0be34834
| 2,873
|
py
|
Python
|
docs/conf.py
|
arrikto/py2deb
|
945cadc080f6263b72dc9ef9cb1d4b7d68c2ba5b
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
arrikto/py2deb
|
945cadc080f6263b72dc9ef9cb1d4b7d68c2ba5b
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
arrikto/py2deb
|
945cadc080f6263b72dc9ef9cb1d4b7d68c2ba5b
|
[
"MIT"
] | 1
|
2021-12-12T15:13:02.000Z
|
2021-12-12T15:13:02.000Z
|
"""
Documentation build configuration file for the `py2deb` package.
This Python script contains the Sphinx configuration for building the
documentation of the `py2deb` project. This file is execfile()d with the
current directory set to its containing dir.
"""
import os
import sys
# Add the 'py2deb' source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath(os.pardir))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'humanfriendly.sphinx',
'property_manager.sphinx',
]
# Configuration for the `autodoc' extension.
autodoc_member_order = 'bysource'
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py2deb'
copyright = u'2018, Paylogic International (Arjan Verwer & Peter Odding)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from py2deb import __version__ as py2deb_version # noqa
# The short X.Y version.
version = '.'.join(py2deb_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = py2deb_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = {
'debpkgtools': ('https://deb-pkg-tools.readthedocs.io/en/latest', None),
'executor': ('https://executor.readthedocs.io/en/latest', None),
'humanfriendly': ('https://humanfriendly.readthedocs.io/en/latest', None),
'pipaccel': ('https://pip-accel.readthedocs.io/en/latest', None),
'propertymanager': ('https://property-manager.readthedocs.io/en/latest', None),
'python': ('https://docs.python.org/2', None),
'setuptools': ('https://setuptools.readthedocs.io/en/latest', None),
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
| 33.022989
| 83
| 0.709363
|
4ae20d9f2578e9047e17c8b21d1f512bf2ecea0e
| 29,054
|
py
|
Python
|
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
|
DYeag/vCenterShell
|
e2e24cd938a92a68f4a8e6a860810d3ef72aae6d
|
[
"Apache-2.0"
] | null | null | null |
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
|
DYeag/vCenterShell
|
e2e24cd938a92a68f4a8e6a860810d3ef72aae6d
|
[
"Apache-2.0"
] | null | null | null |
package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py
|
DYeag/vCenterShell
|
e2e24cd938a92a68f4a8e6a860810d3ef72aae6d
|
[
"Apache-2.0"
] | null | null | null |
import time
import requests
from pyVmomi import vim
from cloudshell.cp.vcenter.common.utilites.io import get_path_and_name
from cloudshell.cp.vcenter.common.vcenter.vm_location import VMLocation
from cloudshell.cp.vcenter.common.utilites.common_utils import str2bool
from cloudshell.cp.vcenter.common.vcenter.task_waiter import SynchronousTaskWaiter
from cloudshell.cp.vcenter.exceptions.task_waiter import TaskFaultException
class VCenterAuthError(Exception):
def __init__(self, message, original_exception):
"""
:param str message:
:param original_exception: The orginoal exception that was raised
:return:
"""
super(VCenterAuthError, self).__init__(message)
self.original_exception = original_exception
class pyVmomiService:
# region consts
ChildEntity = 'childEntity'
VM = 'vmFolder'
Network = 'networkFolder'
Datacenter = 'datacenterFolder'
Host = 'hostFolder'
Datastore = 'datastoreFolder'
Cluster = 'cluster'
# endregion
def __init__(self, connect, disconnect, task_waiter, vim_import=None):
"""
:param SynchronousTaskWaiter task_waiter:
:return:
"""
self.pyvmomi_connect = connect
self.pyvmomi_disconnect = disconnect
self.task_waiter = task_waiter
if vim_import is None:
from pyVmomi import vim
self.vim = vim
else:
self.vim = vim_import
def connect(self, address, user, password, port=443):
"""
Connect to vCenter via SSL and return SI object
:param address: vCenter address (host / ip address)
:param user: user name for authentication
:param password:password for authentication
:param port: port for the SSL connection. Default = 443
"""
'# Disabling urllib3 ssl warnings'
requests.packages.urllib3.disable_warnings()
'# Disabling SSL certificate verification'
context = None
import ssl
if hasattr(ssl, 'SSLContext'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
if context:
try:
'#si = SmartConnect(host=address, user=user, pwd=password, port=port, sslContext=context)'
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port, sslContext=context)
except ssl.SSLEOFError:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_NONE
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port, sslContext=context)
else:
'#si = SmartConnect(host=address, user=user, pwd=password, port=port)'
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port)
return si
except vim.fault.InvalidLogin as e:
raise VCenterAuthError(e.msg, e)
except IOError as e:
# logger.info("I/O error({0}): {1}".format(e.errno, e.strerror))
raise ValueError('Cannot connect to vCenter, please check that the address is valid')
def disconnect(self, si):
""" Disconnect from vCenter """
self.pyvmomi_disconnect(si)
def find_datacenter_by_name(self, si, path, name):
"""
Finds datacenter in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datacenter name to return
"""
return self.find_obj_by_path(si, path, name, self.Datacenter)
def find_by_uuid(self, si, uuid, is_vm=True, path=None, data_center=None):
"""
Finds vm/host by his uuid in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param uuid: the object uuid
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param is_vm: if true, search for virtual machines, otherwise search for hosts
:param data_center:
"""
if uuid is None:
return None
if path is not None:
data_center = self.find_item_in_path_by_type(si, path, vim.Datacenter)
search_index = si.content.searchIndex
return search_index.FindByUuid(data_center, uuid, is_vm)
def find_item_in_path_by_type(self, si, path, obj_type):
"""
This function finds the first item of that type in path
:param ServiceInstance si: pyvmomi ServiceInstance
:param str path: the path to search in
:param type obj_type: the vim type of the object
:return: pyvmomi type instance object or None
"""
if obj_type is None:
return None
search_index = si.content.searchIndex
sub_folder = si.content.rootFolder
if path is None or not path:
return sub_folder
paths = path.split("/")
for currPath in paths:
if currPath is None or not currPath:
continue
manage = search_index.FindChild(sub_folder, currPath)
if isinstance(manage, obj_type):
return manage
return None
def find_host_by_name(self, si, path, name):
"""
Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Host)
def find_datastore_by_name(self, si, path, name):
"""
Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Datastore)
def find_portgroup(self, si, dv_switch_path, name):
"""
Returns the portgroup on the dvSwitch
:param name: str
:param dv_switch_path: str
:param si: service instance
"""
dv_switch = self.get_folder(si, dv_switch_path)
if dv_switch and dv_switch.portgroup:
for port in dv_switch.portgroup:
if port.name == name:
return port
return None
def find_network_by_name(self, si, path, name):
"""
Finds network in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Network)
def find_vm_by_name(self, si, path, name):
"""
Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the vm name to return
"""
return self.find_obj_by_path(si, path, name, self.VM)
def find_obj_by_path(self, si, path, name, type_name):
"""
Finds object in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the object name to return
:param type_name: the name of the type, can be (vm, network, host, datastore)
"""
folder = self.get_folder(si, path)
if folder is None:
raise ValueError('vmomi managed object not found at: {0}'.format(path))
look_in = None
if hasattr(folder, type_name):
look_in = getattr(folder, type_name)
if hasattr(folder, self.ChildEntity):
look_in = folder
if look_in is None:
raise ValueError('vmomi managed object not found at: {0}'.format(path))
search_index = si.content.searchIndex
'#searches for the specific vm in the folder'
return search_index.FindChild(look_in, name)
def find_dvs_by_path(self,si ,path):
"""
Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
"""
dvs = self.get_folder(si, path)
if not dvs:
raise ValueError('Could not find Default DvSwitch in path {0}'.format(path))
elif not isinstance(dvs, vim.dvs.VmwareDistributedVirtualSwitch):
raise ValueError('The object in path {0} is {1} and not a DvSwitch'.format(path, type(dvs)))
return dvs
def get_folder(self, si, path, root=None):
"""
Finds folder in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
"""
search_index = si.content.searchIndex
sub_folder = root if root else si.content.rootFolder
if not path:
return sub_folder
paths = [p for p in path.split("/") if p]
child = None
try:
new_root = search_index.FindChild(sub_folder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
except:
child = None
if child is None and hasattr(sub_folder, self.ChildEntity):
new_root = search_index.FindChild(sub_folder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.VM):
new_root = search_index.FindChild(sub_folder.vmFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Datastore):
new_root = search_index.FindChild(sub_folder.datastoreFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Network):
new_root = search_index.FindChild(sub_folder.networkFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Host):
new_root = search_index.FindChild(sub_folder.hostFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Datacenter):
new_root = search_index.FindChild(sub_folder.datacenterFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, 'resourcePool'):
new_root = search_index.FindChild(sub_folder.resourcePool, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
return child
def get_network_by_full_name(self, si, default_network_full_name):
"""
Find network by a Full Name
:param default_network_full_name: <str> Full Network Name - likes 'Root/Folder/Network'
:return:
"""
path, name = get_path_and_name(default_network_full_name)
return self.find_network_by_name(si, path, name) if name else None
def get_obj(self, content, vimtype, name):
"""
Return an object by name for a specific type, if name is None the
first found object is returned
:param content: pyvmomi content object
:param vimtype: the type of object too search
:param name: the object name to return
"""
obj = None
container = self._get_all_objects_by_type(content, vimtype)
# If no name was given will return the first object from list of a objects matching the given vimtype type
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj
@staticmethod
def _get_all_objects_by_type(content, vimtype):
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
return container
@staticmethod
def get_default_from_vcenter_by_type(si, vimtype, accept_multi):
arr_items = pyVmomiService.get_all_items_in_vcenter(si, vimtype)
if arr_items:
if accept_multi or len(arr_items) == 1:
return arr_items[0]
raise Exception('There is more the one items of the given type')
raise KeyError('Could not find item of the given type')
@staticmethod
def get_all_items_in_vcenter(si, type_filter, root=None):
root = root if root else si.content.rootFolder
container = si.content.viewManager.CreateContainerView(container=root, recursive=True)
return [item for item in container.view if not type_filter or isinstance(item, type_filter)]
class CloneVmParameters:
"""
This is clone_vm method params object
"""
def __init__(self,
si,
template_name,
vm_name,
vm_folder,
datastore_name=None,
cluster_name=None,
resource_pool=None,
power_on=True,
snapshot=''):
"""
Constructor of CloneVmParameters
:param si: pyvmomi 'ServiceInstance'
:param template_name: str: the name of the template/vm to clone
:param vm_name: str: the name that will be given to the cloned vm
:param vm_folder: str: the path to the location of the template/vm to clone
:param datastore_name: str: the name of the datastore
:param cluster_name: str: the name of the dcluster
:param resource_pool: str: the name of the resource pool
:param power_on: bool: turn on the cloned vm
:param snapshot: str: the name of the snapshot to clone from
"""
self.si = si
self.template_name = template_name
self.vm_name = vm_name
self.vm_folder = vm_folder
self.datastore_name = datastore_name
self.cluster_name = cluster_name
self.resource_pool = resource_pool
self.power_on = str2bool(power_on)
self.snapshot = snapshot
class CloneVmResult:
"""
Clone vm result object, will contain the cloned vm or error message
"""
def __init__(self, vm=None, error=None):
"""
Constructor receives the cloned vm or the error message
:param vm: cloned vm
:param error: will contain the error message if there is one
"""
self.vm = vm
self.error = error
def clone_vm(self, clone_params, logger, cancellation_context):
"""
Clone a VM from a template/VM and return the vm oject or throws argument is not valid
:param cancellation_context:
:param clone_params: CloneVmParameters =
:param logger:
"""
result = self.CloneVmResult()
if not isinstance(clone_params.si, self.vim.ServiceInstance):
result.error = 'si must be init as ServiceInstance'
return result
if clone_params.template_name is None:
result.error = 'template_name param cannot be None'
return result
if clone_params.vm_name is None:
result.error = 'vm_name param cannot be None'
return result
if clone_params.vm_folder is None:
result.error = 'vm_folder param cannot be None'
return result
datacenter = self.get_datacenter(clone_params)
dest_folder = self._get_destination_folder(clone_params)
vm_location = VMLocation.create_from_full_path(clone_params.template_name)
template = self._get_template(clone_params, vm_location)
snapshot = self._get_snapshot(clone_params, template)
resource_pool, host = self._get_resource_pool(datacenter.name, clone_params)
if not resource_pool and not host:
raise ValueError('The specifed host, cluster or resource pool could not be found')
'# set relo_spec'
placement = self.vim.vm.RelocateSpec()
if resource_pool:
placement.pool = resource_pool
if host:
placement.host = host
clone_spec = self.vim.vm.CloneSpec()
if snapshot:
clone_spec.snapshot = snapshot
clone_spec.template = False
placement.diskMoveType = 'createNewChildDiskBacking'
placement.datastore = self._get_datastore(clone_params)
# after deployment the vm must be powered off and will be powered on if needed by orchestration driver
clone_spec.location = placement
# clone_params.power_on
# due to hotfix 1 for release 1.0,
clone_spec.powerOn = False
logger.info("cloning VM...")
try:
task = template.Clone(folder=dest_folder, name=clone_params.vm_name, spec=clone_spec)
vm = self.task_waiter.wait_for_task(task=task, logger=logger, action_name='Clone VM',
cancellation_context=cancellation_context)
except TaskFaultException:
raise
except vim.fault.NoPermission as error:
logger.error("vcenter returned - no permission: {0}".format(error))
raise Exception('Permissions is not set correctly, please check the log for more info.')
except Exception as e:
logger.error("error deploying: {0}".format(e))
raise Exception('Error has occurred while deploying, please look at the log for more info.')
result.vm = vm
return result
def get_datacenter(self, clone_params):
splited = clone_params.vm_folder.split('/')
root_path = splited[0]
datacenter = self.get_folder(clone_params.si, root_path)
return datacenter
def _get_destination_folder(self, clone_params):
managed_object = self.get_folder(clone_params.si, clone_params.vm_folder)
dest_folder = ''
if isinstance(managed_object, self.vim.Datacenter):
dest_folder = managed_object.vmFolder
elif isinstance(managed_object, self.vim.Folder):
dest_folder = managed_object
if not dest_folder:
raise ValueError('Failed to find folder: {0}'.format(clone_params.vm_folder))
return dest_folder
def _get_template(self, clone_params, vm_location):
template = self.find_vm_by_name(clone_params.si, vm_location.path, vm_location.name)
if not template:
raise ValueError('Virtual Machine Template with name {0} was not found under folder {1}'
.format(vm_location.name, vm_location.path))
return template
def _get_datastore(self, clone_params):
datastore = ''
parts = clone_params.datastore_name.split('/')
if not parts:
raise ValueError('Datastore could not be empty')
name = parts[len(parts) - 1]
if name:
datastore = self.get_obj(clone_params.si.content,
[self.vim.Datastore],
name)
if not datastore:
datastore = self.get_obj(clone_params.si.content,
[self.vim.StoragePod],
name)
if datastore:
datastore = sorted(datastore.childEntity,
key=lambda data: data.summary.freeSpace,
reverse=True)[0]
if not datastore:
raise ValueError('Could not find Datastore: "{0}"'.format(clone_params.datastore_name))
return datastore
def _get_resource_pool(self, datacenter_name, clone_params):
resource_full_path = '{0}/{1}/{2}'.format(datacenter_name,
clone_params.cluster_name,
clone_params.resource_pool)
obj = self.get_folder(clone_params.si, resource_full_path)
resource_pool = None
host = None
if isinstance(obj, self.vim.HostSystem):
host = obj
resource_pool = obj.parent.resourcePool
elif isinstance(obj, self.vim.ResourcePool):
resource_pool = obj
elif isinstance(obj, self.vim.ClusterComputeResource):
resource_pool = obj.resourcePool
return resource_pool, host
def destroy_vm(self, vm, logger):
"""
destroy the given vm
:param vm: virutal machine pyvmomi object
:param logger:
"""
self.power_off_before_destroy(logger, vm)
logger.info(("Destroying VM {0}".format(vm.name)))
task = vm.Destroy_Task()
return self.task_waiter.wait_for_task(task=task, logger=logger, action_name="Destroy VM")
def power_off_before_destroy(self, logger, vm):
if vm.runtime.powerState == 'poweredOn':
logger.info(("The current powerState is: {0}. Attempting to power off {1}"
.format(vm.runtime.powerState, vm.name)))
task = vm.PowerOffVM_Task()
self.task_waiter.wait_for_task(task=task, logger=logger, action_name="Power Off Before Destroy")
def destroy_vm_by_name(self, si, vm_name, vm_path, logger):
"""
destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_name: str name of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger:
"""
if vm_name is not None:
vm = self.find_vm_by_name(si, vm_path, vm_name)
if vm:
return self.destroy_vm(vm, logger)
raise ValueError('vm not found')
def destroy_vm_by_uuid(self, si, vm_uuid, vm_path, logger):
"""
destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_uuid: str uuid of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger:
"""
if vm_uuid is not None:
vm = self.find_by_uuid(si, vm_uuid, vm_path)
if vm:
return self.destroy_vm(vm, logger)
# return 'vm not found'
# for apply the same Interface as for 'destroy_vm_by_name'
raise ValueError('vm not found')
def get_vm_by_uuid(self, si, vm_uuid):
return self.find_by_uuid(si, vm_uuid, True)
def get_network_by_name_from_vm(self, vm, network_name):
for network in vm.network:
if network_name == network.name:
return network
return None
def get_network_by_key_from_vm(self, vm, network_key):
for network in vm.network:
if hasattr(network, 'key') and network_key == network.key:
return network
return
def get_network_by_mac_address(self, vm, mac_address):
backing = [device.backing for device in vm.config.hardware.device
if isinstance(device, vim.vm.device.VirtualEthernetCard)
and hasattr(device, 'macAddress')
and device.macAddress == mac_address]
if backing:
back = backing[0]
if hasattr(back, 'network'):
return back.network
if hasattr(back, 'port'):
return back.port
return None
def get_vnic_by_mac_address(self, vm, mac_address):
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard) \
and hasattr(device, 'macAddress') and device.macAddress == mac_address:
# mac address is unique
return device
return None
@staticmethod
def vm_reconfig_task(vm, device_change):
"""
Create Task for VM re-configure
:param vm: <vim.vm obj> VM which will be re-configure
:param device_change:
:return: Task
"""
config_spec = vim.vm.ConfigSpec(deviceChange=device_change)
task = vm.ReconfigVM_Task(config_spec)
return task
@staticmethod
def vm_get_network_by_name(vm, network_name):
"""
Try to find Network scanning all attached to VM networks
:param vm: <vim.vm>
:param network_name: <str> name of network
:return: <vim.vm.Network or None>
"""
# return None
for network in vm.network:
if hasattr(network, "name") and network_name == network.name:
return network
return None
@staticmethod
def _get_snapshot(clone_params, template):
snapshot_name = getattr(clone_params, 'snapshot', None)
if not snapshot_name:
return None
if not hasattr(template, 'snapshot') and hasattr(template.snapshot, 'rootSnapshotList'):
raise ValueError('The given vm does not have any snapshots')
paths = snapshot_name.split('/')
temp_snap = template.snapshot
for path in paths:
if path:
root = getattr(temp_snap, 'rootSnapshotList', getattr(temp_snap, 'childSnapshotList', None))
if not root:
temp_snap = None
break
temp = pyVmomiService._get_snapshot_from_root_snapshot(path, root)
if not temp:
temp_snap = None
break
else:
temp_snap = temp
if temp_snap:
return temp_snap.snapshot
raise ValueError('Could not find snapshot in vm')
@staticmethod
def _get_snapshot_from_root_snapshot(name, root_snapshot):
sorted_by_creation = sorted(root_snapshot, key=lambda x: x.createTime, reverse=True)
for snapshot_header in sorted_by_creation:
if snapshot_header.name == name:
return snapshot_header
return None
def get_folder_contents(self, folder, recursive=False):
vms = []
folders = []
for item in folder.childEntity:
if isinstance(item, self.vim.VirtualMachine):
vms.append(item)
elif isinstance(item, self.vim.Folder):
folders.append(item)
if recursive:
v, f = self.get_folder_contents(item, recursive)
vms.extend(v)
folders.extend(f)
return vms, folders
def get_vm_full_path(self, si, vm):
"""
:param vm: vim.VirtualMachine
:return:
"""
folder_name = None
folder = vm.parent
if folder:
folder_name = folder.name
folder_parent = folder.parent
while folder_parent and folder_parent.name and folder_parent != si.content.rootFolder and not isinstance(folder_parent, vim.Datacenter):
folder_name = folder_parent.name + '/' + folder_name
try:
folder_parent = folder_parent.parent
except Exception:
break
# at this stage we receive a path like this: vm/FOLDER1/FOLDER2;
# we're not interested in the "vm" part, so we throw that away
folder_name = '/'.join(folder_name.split('/')[1:])
# ok, now we're adding the vm name; btw, if there is no folder, that's cool, just return vm.name
return VMLocation.combine([folder_name, vm.name]) if folder_name else vm.name
def vm_has_no_vnics(vm):
# Is there any network device on vm
return next((False for device in vm.config.hardware.device
if isinstance(device, vim.vm.device.VirtualEthernetCard) and hasattr(device, 'macAddress')), True)
| 38.329815
| 148
| 0.60033
|
0c63585ed731440a08d3d7043a34890879e10bf8
| 309
|
py
|
Python
|
Data Scientist Career Path/5. Data Manipulation with Pandas/2. Hands On with Pandas/2. Modifying/4. column.py
|
myarist/Codecademy
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
[
"MIT"
] | 23
|
2021-06-06T15:35:55.000Z
|
2022-03-21T06:53:42.000Z
|
Data Scientist Career Path/5. Data Manipulation with Pandas/2. Hands On with Pandas/2. Modifying/4. column.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Scientist Career Path/5. Data Manipulation with Pandas/2. Hands On with Pandas/2. Modifying/4. column.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 9
|
2021-06-08T01:32:04.000Z
|
2022-03-18T15:38:09.000Z
|
import codecademylib
from string import lower
import pandas as pd
df = pd.DataFrame([
['JOHN SMITH', 'john.smith@gmail.com'],
['Jane Doe', 'jdoe@yahoo.com'],
['joe schmo', 'joeschmo@hotmail.com']
],
columns=['Name', 'Email'])
# Add columns here
df['Lowercase Name'] = df['Name'].apply(lower)
print(df)
| 22.071429
| 46
| 0.673139
|
f65852654674bacf17c20ac2e29797f4a70c07a2
| 21,586
|
py
|
Python
|
src/python/tests/core/local/butler/deploy_test.py
|
robertswiecki/clusterfuzz
|
3c28bad3d49c95a3cc36efc86e33656e9a02dbf1
|
[
"Apache-2.0"
] | 1
|
2020-05-21T18:47:06.000Z
|
2020-05-21T18:47:06.000Z
|
src/python/tests/core/local/butler/deploy_test.py
|
robertswiecki/clusterfuzz
|
3c28bad3d49c95a3cc36efc86e33656e9a02dbf1
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/local/butler/deploy_test.py
|
robertswiecki/clusterfuzz
|
3c28bad3d49c95a3cc36efc86e33656e9a02dbf1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deploy tests."""
# pylint: disable=protected-access
import datetime
import json
import mock
import os
import sys
import unittest
from pyfakefs import fake_filesystem_unittest
import yaml
from local.butler import deploy
from tests.test_libs import helpers
from tests.test_libs import test_utils
@mock.patch('local.butler.deploy.RETRY_WAIT_SECONDS', 0)
class DeployTest(fake_filesystem_unittest.TestCase):
"""Deploy tests."""
def setUp(self):
real_cwd = os.path.realpath(os.getcwd())
test_utils.set_up_pyfakefs(self)
self.fs.add_real_directory(
os.path.join(real_cwd, 'src', 'appengine'), read_only=False)
self.fs.add_real_directory(
os.path.join(real_cwd, 'src', 'go', 'server'), read_only=False)
helpers.patch_environ(self)
helpers.patch(self, [
'local.butler.common.execute',
'local.butler.common.Gcloud.run',
'local.butler.common.has_file_in_path',
'local.butler.deploy.now',
'os.remove',
])
self.mock.execute.side_effect = self._mock_execute
self.mock.has_file_in_path.return_value = True
self.deploy_failure_count = 0
os.environ['ROOT_DIR'] = '.'
self.mock.now.return_value = datetime.datetime(2017, 1, 3, 12, 1)
self.manifest_target = 'clusterfuzz-source.manifest'
if sys.version_info.major == 3:
self.manifest_target += '.3'
def _check_env_variables(self, yaml_paths):
"""Check that environment variables are written to yaml paths."""
for yaml_path in yaml_paths:
with open(yaml_path) as f:
data = yaml.safe_load(f)
self.assertIn('env_variables', data)
env_variables = data['env_variables']
self.assertEqual('test-clusterfuzz', env_variables['APPLICATION_ID'])
self.assertEqual('test-project', env_variables['PROJECT_NAME'])
self.assertEqual('test-corpus-bucket', env_variables['CORPUS_BUCKET'])
self.assertEqual('test-quarantine-bucket',
env_variables['QUARANTINE_BUCKET'])
self.assertEqual('test-shared-corpus-bucket',
env_variables['SHARED_CORPUS_BUCKET'])
def _check_no_env_variables(self, yaml_paths):
"""Check that environment variables are not written to yaml paths."""
for yaml_path in yaml_paths:
with open(yaml_path) as f:
data = yaml.safe_load(f)
self.assertNotIn('env_variables', data)
# pylint: disable=unused-argument
def _mock_execute(self, command, *args, **kwargs):
"""Mock execute."""
if 'app deploy' in command:
if self.deploy_failure_count == 0:
return (0, b'ok')
self.deploy_failure_count -= 1
return (1, b'failure')
if 'app describe' in command:
return (0, b'us-central')
if 'describe redis-instance' in command:
return (0, b'redis-ip')
if 'describe' in command:
return (1, b'')
if 'versions list' in command:
return (0,
json.dumps([
{
'id': 'v1',
'last_deployed_time': {
'year': 2017,
'month': 1,
'day': 2,
'hour': 0,
'minute': 0,
'second': 0,
},
'traffic_split': 0.0,
},
{
'id': 'v2',
'last_deployed_time': {
'year': 2017,
'month': 1,
'day': 3,
'hour': 0,
'minute': 0,
'second': 0,
},
'traffic_split': 0.0,
},
{
'id': 'current',
'last_deployed_time': {
'year': 2017,
'month': 1,
'day': 3,
'hour': 12,
'minute': 0,
'second': 1,
},
'traffic_split': 1.0,
},
]).encode())
return (0, b'')
def test_app(self):
"""Test deploy app."""
deploy._prod_deployment_helper(
'/config_dir', ['/windows.zip', '/mac.zip', '/linux.zip'],
deploy_go=True)
self.mock.run.assert_has_calls([
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'pubsub', '--config=./configs/test/pubsub/queues.yaml'),
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'bigquery', '--config=./configs/test/bigquery/datasets.yaml'),
])
self.mock.execute.assert_has_calls([
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=default'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=default v1'),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=cron-service'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=cron-service v1'),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=go-cron-service'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=go-cron-service v1'),
mock.call('gsutil cp /windows.zip gs://test-deployment-bucket/'
'windows.zip'),
mock.call('gsutil cp /mac.zip gs://test-deployment-bucket/'
'mac.zip'),
mock.call('gsutil cp /linux.zip gs://test-deployment-bucket/'
'linux.zip'),
mock.call('gsutil cp -a public-read src/appengine/resources/'
'clusterfuzz-source.manifest '
'gs://test-deployment-bucket/' + self.manifest_target),
mock.call('python butler.py run setup --config-dir /config_dir '
'--non-dry-run'),
])
self._check_env_variables([
'src/appengine/app.yaml', 'src/appengine/cron-service.yaml',
'src/go/server/go-cron-service.yaml'
])
self._check_no_env_variables(
['src/appengine/cron.yaml', 'src/appengine/index.yaml'])
def test_app_without_go(self):
"""Test deploy app without go."""
deploy._prod_deployment_helper(
'/config_dir', ['/windows.zip', '/mac.zip', '/linux.zip'],
deploy_go=False)
self.mock.run.assert_has_calls([
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'pubsub', '--config=./configs/test/pubsub/queues.yaml'),
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'bigquery', '--config=./configs/test/bigquery/datasets.yaml'),
])
self.mock.execute.assert_has_calls([
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml',
exit_on_error=False),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=default'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=default v1'),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=cron-service'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=cron-service v1'),
mock.call('gsutil cp /windows.zip gs://test-deployment-bucket/'
'windows.zip'),
mock.call('gsutil cp /mac.zip gs://test-deployment-bucket/'
'mac.zip'),
mock.call('gsutil cp /linux.zip gs://test-deployment-bucket/'
'linux.zip'),
mock.call('gsutil cp -a public-read src/appengine/resources/'
'clusterfuzz-source.manifest '
'gs://test-deployment-bucket/' + self.manifest_target),
mock.call('python butler.py run setup --config-dir /config_dir '
'--non-dry-run'),
])
self._check_env_variables(
['src/appengine/app.yaml', 'src/appengine/cron-service.yaml'])
self._check_no_env_variables(
['src/appengine/cron.yaml', 'src/appengine/index.yaml'])
def test_app_staging(self):
"""Test deploy app to staging."""
deploy._staging_deployment_helper(deploy_go=True)
self.mock.execute.assert_has_calls([
mock.call(
'gcloud app deploy --stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/staging.yaml '
'src/go/server/go-cron-service-staging.yaml',
exit_on_error=False),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=staging'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=staging v1 v2'),
mock.call(
'gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=go-cron-service-staging'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=go-cron-service-staging v1 v2'),
])
self._check_env_variables(['src/appengine/staging.yaml'])
def test_app_retry(self):
"""Test deploy app with retries."""
self.deploy_failure_count = 1
deploy._prod_deployment_helper(
'/config_dir', ['/windows.zip', '/mac.zip', '/linux.zip'],
deploy_go=True)
self.mock.run.assert_has_calls([
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'pubsub', '--config=./configs/test/pubsub/queues.yaml'),
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'bigquery', '--config=./configs/test/bigquery/datasets.yaml'),
])
self.mock.execute.assert_has_calls([
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=default'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=default v1'),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=cron-service'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=cron-service v1'),
mock.call('gcloud app versions list --format=json '
'--project=test-clusterfuzz --service=go-cron-service'),
mock.call(
'gcloud app versions delete --quiet --project=test-clusterfuzz '
'--service=go-cron-service v1'),
mock.call('gsutil cp /windows.zip gs://test-deployment-bucket/'
'windows.zip'),
mock.call('gsutil cp /mac.zip gs://test-deployment-bucket/'
'mac.zip'),
mock.call('gsutil cp /linux.zip gs://test-deployment-bucket/'
'linux.zip'),
mock.call('gsutil cp -a public-read src/appengine/resources/'
'clusterfuzz-source.manifest '
'gs://test-deployment-bucket/' + self.manifest_target),
mock.call('python butler.py run setup --config-dir /config_dir '
'--non-dry-run'),
])
self._check_env_variables([
'src/appengine/app.yaml', 'src/appengine/cron-service.yaml',
'src/go/server/go-cron-service.yaml'
])
self._check_no_env_variables(
['src/appengine/cron.yaml', 'src/appengine/index.yaml'])
def test_app_retry_failure(self):
"""Test deploy app with retries (failure)."""
self.deploy_failure_count = 4
with self.assertRaises(SystemExit):
deploy._prod_deployment_helper(
'/config_dir', ['/windows.zip', '/mac.zip', '/linux.zip'],
deploy_go=True)
self.mock.run.assert_has_calls([
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'pubsub', '--config=./configs/test/pubsub/queues.yaml'),
mock.call(mock.ANY, 'deployment-manager', 'deployments', 'update',
'bigquery', '--config=./configs/test/bigquery/datasets.yaml'),
])
self.mock.execute.assert_has_calls([
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
mock.call(
'gcloud app deploy --no-stop-previous-version --quiet '
'--project=test-clusterfuzz '
'src/appengine/index.yaml '
'src/appengine/app.yaml '
'src/appengine/cron.yaml '
'src/appengine/cron-service.yaml '
'src/go/server/go-cron-service.yaml',
exit_on_error=False),
])
class FindFileExceedingLimitTest(fake_filesystem_unittest.TestCase):
"""Test finding files exceeding limit."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
self.fs.create_file('/test/small1', contents='aaa')
self.fs.create_file('/test/small2', contents='aaa')
self.fs.create_file('/test/dir1/small3', contents='aaa')
self.fs.create_file('/test/dir1/small4', contents='aaa')
self.fs.create_file('/test/dir1/dir1/small5', contents='aaa')
self.fs.create_file('/test/dir2/small6', contents='aaa')
def test_get_too_large_file(self):
"""Test getting a too large file."""
self.fs.create_file('/test/dir1/dir1/too_large', contents='aaaaaa')
self.assertEqual('/test/dir1/dir1/too_large',
deploy.find_file_exceeding_limit('/test', 5))
def test_get_none(self):
"""Test when there's no too large file."""
self.assertIsNone(deploy.find_file_exceeding_limit('/test', 10))
class GetRemoteShaTest(unittest.TestCase):
"""Test get_remote_sha."""
def setUp(self):
helpers.patch(self, ['local.butler.common.execute'])
def test_get(self):
"""Test get_remote_sha."""
self.mock.execute.return_value = (
0, b'cbb7f93c7ddc1c3a3c98f45ebf5c3490a0c38e95 refs/heads/master')
self.assertEqual(b'cbb7f93c7ddc1c3a3c98f45ebf5c3490a0c38e95',
deploy.get_remote_sha())
class IsDiffOriginMasterTest(unittest.TestCase):
"""Test is_diff_origin_master."""
def setUp(self):
helpers.patch(
self,
['local.butler.common.execute', 'local.butler.deploy.get_remote_sha'])
self.head = ''
self.diff = ''
def execute(cmd):
if cmd == 'git fetch':
return (0, '')
elif cmd == 'git rev-parse HEAD':
return (0, self.head)
elif cmd == 'git diff origin/master --stat':
return (0, self.diff)
else:
raise Exception()
self.mock.execute.side_effect = execute
def test_good(self):
"""Test good."""
self.diff = ''
self.mock.get_remote_sha.return_value = 'sha'
self.head = 'sha'
self.assertFalse(deploy.is_diff_origin_master())
def test_diff(self):
"""Test diff."""
self.diff = 'something'
self.mock.get_remote_sha.return_value = 'sha'
self.head = 'sha'
self.assertTrue(deploy.is_diff_origin_master())
def test_diff_sha(self):
"""Test different sha."""
self.diff = ''
self.mock.get_remote_sha.return_value = 'sha'
self.head = 'sha2'
self.assertTrue(deploy.is_diff_origin_master())
class VersionsToDeleteTest(unittest.TestCase):
"""Test _versions_to_delete."""
def setUp(self):
helpers.patch(self, [
'local.butler.deploy.now',
])
def test_single_version(self):
"""Test single revision."""
self.mock.now.return_value = datetime.datetime(2017, 1, 1, 0, 0)
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2017, 1, 1, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([], to_delete)
def test_two_revisions(self):
"""Test two revision."""
self.mock.now.return_value = datetime.datetime(2017, 1, 1, 0, 0)
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2017, 1, 1, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([], to_delete)
def test_cutoff(self):
"""Test various cutoffs."""
self.mock.now.return_value = datetime.datetime(2017, 1, 30, 0, 0)
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2017, 1, 28, 23, 59), 0.0),
deploy.Version('3', datetime.datetime(2017, 1, 29, 0, 0), 0.0),
deploy.Version('4', datetime.datetime(2017, 1, 30, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2017, 1, 28, 23, 59), 0.0),
], to_delete)
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2017, 1, 28, 23, 59), 0.0),
deploy.Version('3', datetime.datetime(2017, 1, 29, 0, 1), 0.0),
deploy.Version('4', datetime.datetime(2017, 1, 30, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
], to_delete)
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2017, 1, 29, 0, 1), 0.0),
deploy.Version('3', datetime.datetime(2017, 1, 29, 0, 2), 0.0),
deploy.Version('4', datetime.datetime(2017, 1, 30, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([], to_delete)
# Latest version should never be deleted.
to_delete = deploy._versions_to_delete([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2016, 1, 29, 0, 1), 0.0),
deploy.Version('3', datetime.datetime(2016, 1, 29, 0, 2), 0.0),
deploy.Version('4', datetime.datetime(2016, 1, 30, 0, 0), 1.0),
], 24 * 60)
self.assertEqual([
deploy.Version('1', datetime.datetime(2016, 1, 1, 0, 0), 0.0),
deploy.Version('2', datetime.datetime(2016, 1, 29, 0, 1), 0.0),
deploy.Version('3', datetime.datetime(2016, 1, 29, 0, 2), 0.0),
], to_delete)
| 38.34103
| 80
| 0.585101
|
34a74d7ca6b43b84e6253310c794575c08a3adb1
| 97
|
py
|
Python
|
tests/__init__.py
|
biracruz/rasterrgb
|
35bbda4ba27b10bf5deebda0f93393af7c9c3ff3
|
[
"MIT"
] | 1
|
2018-01-03T20:35:01.000Z
|
2018-01-03T20:35:01.000Z
|
tests/__init__.py
|
biracruz/rasterrgb
|
35bbda4ba27b10bf5deebda0f93393af7c9c3ff3
|
[
"MIT"
] | 328
|
2017-02-07T03:00:57.000Z
|
2022-03-27T18:45:20.000Z
|
tests/__init__.py
|
biracruz/rasterrgb
|
35bbda4ba27b10bf5deebda0f93393af7c9c3ff3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from tests import test_rasterrgb
import numpy as np
import gdal, ogr, osr
| 24.25
| 32
| 0.721649
|
71203c36639965d4bd044f40c18e2c787add83ce
| 8,540
|
py
|
Python
|
gammapy/maps/tests/test_geom.py
|
mealworm/gammapy
|
a838b2ca347dd6321f8da4e4097a33150d7b9be6
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/maps/tests/test_geom.py
|
mealworm/gammapy
|
a838b2ca347dd6321f8da4e4097a33150d7b9be6
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/maps/tests/test_geom.py
|
mealworm/gammapy
|
a838b2ca347dd6321f8da4e4097a33150d7b9be6
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from collections import OrderedDict
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord, Angle
from regions import CircleSkyRegion
from ..geom import MapAxis, MapCoord
pytest.importorskip('scipy')
mapaxis_geoms = [
(np.array([0.25, 0.75, 1.0, 2.0]), 'lin'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'log'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'sqrt'),
]
mapaxis_geoms_node_type = [
(np.array([0.25, 0.75, 1.0, 2.0]), 'lin', 'edge'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'log', 'edge'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'sqrt', 'edge'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'lin', 'center'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'log', 'center'),
(np.array([0.25, 0.75, 1.0, 2.0]), 'sqrt', 'center'),
]
@pytest.mark.parametrize(('edges', 'interp'),
mapaxis_geoms)
def test_mapaxis_init_from_edges(edges, interp):
axis = MapAxis(edges, interp=interp)
assert_allclose(axis.edges, edges)
assert_allclose(axis.nbin, len(edges) - 1)
@pytest.mark.parametrize(('nodes', 'interp'),
mapaxis_geoms)
def test_mapaxis_from_nodes(nodes, interp):
axis = MapAxis.from_nodes(nodes, interp=interp)
assert_allclose(axis.center, nodes)
assert_allclose(axis.nbin, len(nodes))
@pytest.mark.parametrize(('nodes', 'interp'),
mapaxis_geoms)
def test_mapaxis_from_bounds(nodes, interp):
axis = MapAxis.from_bounds(nodes[0], nodes[-1], 3,
interp=interp)
assert_allclose(axis.edges[0], nodes[0])
assert_allclose(axis.edges[-1], nodes[-1])
assert_allclose(axis.nbin, 3)
@pytest.mark.parametrize(('nodes', 'interp', 'node_type'),
mapaxis_geoms_node_type)
def test_mapaxis_pix_to_coord(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(axis.center,
axis.pix_to_coord(np.arange(axis.nbin, dtype=float)))
assert_allclose(np.arange(axis.nbin + 1, dtype=float) - 0.5,
axis.coord_to_pix(axis.edges))
@pytest.mark.parametrize(('nodes', 'interp', 'node_type'),
mapaxis_geoms_node_type)
def test_mapaxis_coord_to_idx(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(np.arange(axis.nbin, dtype=int),
axis.coord_to_idx(axis.center))
@pytest.mark.parametrize(('nodes', 'interp', 'node_type'),
mapaxis_geoms_node_type)
def test_mapaxis_slice(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, 3))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(1, 3)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, None))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(1, None)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, 2))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(None, 2)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, -1))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(None, -1)])
def test_mapcoords_create():
# From existing MapCoord
coords_cel = MapCoord.create((0.0, 1.0), coordsys='CEL')
coords_gal = MapCoord.create(coords_cel, coordsys='GAL')
assert_allclose(coords_gal.lon, coords_cel.skycoord.galactic.l.deg)
assert_allclose(coords_gal.lat, coords_cel.skycoord.galactic.b.deg)
# 2D Tuple of scalars
coords = MapCoord.create((0.0, 1.0))
assert_allclose(coords.lon, 0.0)
assert_allclose(coords.lat, 1.0)
assert_allclose(coords[0], 0.0)
assert_allclose(coords[1], 1.0)
assert coords.coordsys is None
assert coords.ndim == 2
# 3D Tuple of scalars
coords = MapCoord.create((0.0, 1.0, 2.0))
assert_allclose(coords[0], 0.0)
assert_allclose(coords[1], 1.0)
assert_allclose(coords[2], 2.0)
assert coords.coordsys is None
assert coords.ndim == 3
# 2D Tuple w/ NaN coordinates
coords = MapCoord.create((np.nan, np.nan))
# 2D Tuple w/ NaN coordinates
lon, lat = np.array([np.nan, 1.0]), np.array([np.nan, 3.0])
coords = MapCoord.create((lon, lat))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
# 2D Tuple w/ SkyCoord
lon, lat = np.array([0.0, 1.0]), np.array([2.0, 3.0])
energy = np.array([100., 1000.])
skycoord_cel = SkyCoord(lon, lat, unit='deg', frame='icrs')
skycoord_gal = SkyCoord(lon, lat, unit='deg', frame='galactic')
coords = MapCoord.create((skycoord_cel,))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert coords.coordsys == 'CEL'
assert coords.ndim == 2
coords = MapCoord.create((skycoord_gal,))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert coords.coordsys == 'GAL'
assert coords.ndim == 2
# SkyCoord
coords = MapCoord.create(skycoord_cel)
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert coords.coordsys == 'CEL'
assert coords.ndim == 2
coords = MapCoord.create(skycoord_gal)
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert coords.coordsys == 'GAL'
assert coords.ndim == 2
# 2D Dict w/ vectors
coords = MapCoord.create(dict(lon=lon, lat=lat))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert coords.ndim == 2
# 3D Dict w/ vectors
coords = MapCoord.create(dict(lon=lon, lat=lat, energy=energy))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert_allclose(coords['energy'], energy)
assert coords.ndim == 3
# 3D Dict w/ SkyCoord
coords = MapCoord.create(dict(skycoord=skycoord_cel, energy=energy))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert_allclose(coords['energy'], energy)
assert coords.ndim == 3
# 3D OrderedDict w/ vectors
coords = MapCoord.create(OrderedDict([('energy', energy),
('lat', lat), ('lon', lon)]))
assert_allclose(coords.lon, lon)
assert_allclose(coords.lat, lat)
assert_allclose(coords['energy'], energy)
assert_allclose(coords[0], energy)
assert_allclose(coords[1], lat)
assert_allclose(coords[2], lon)
assert coords.ndim == 3
def test_mapcoords_to_coordsys():
lon, lat = np.array([0.0, 1.0]), np.array([2.0, 3.0])
energy = np.array([100., 1000.])
skycoord_cel = SkyCoord(lon, lat, unit='deg', frame='icrs')
skycoord_gal = SkyCoord(lon, lat, unit='deg', frame='galactic')
coords = MapCoord.create(
dict(lon=lon, lat=lat, energy=energy), coordsys='CEL')
assert coords.coordsys == 'CEL'
assert_allclose(coords.skycoord.transform_to(
'icrs').ra.deg, skycoord_cel.ra.deg)
assert_allclose(coords.skycoord.transform_to(
'icrs').dec.deg, skycoord_cel.dec.deg)
coords = coords.to_coordsys('GAL')
assert coords.coordsys == 'GAL'
assert_allclose(coords.skycoord.transform_to(
'galactic').l.deg, skycoord_cel.galactic.l.deg)
assert_allclose(coords.skycoord.transform_to(
'galactic').b.deg, skycoord_cel.galactic.b.deg)
coords = MapCoord.create(
dict(lon=lon, lat=lat, energy=energy), coordsys='GAL')
assert coords.coordsys == 'GAL'
assert_allclose(coords.skycoord.transform_to(
'galactic').l.deg, skycoord_gal.l.deg)
assert_allclose(coords.skycoord.transform_to(
'galactic').b.deg, skycoord_gal.b.deg)
coords = coords.to_coordsys('CEL')
assert coords.coordsys == 'CEL'
assert_allclose(coords.skycoord.transform_to(
'icrs').ra.deg, skycoord_gal.icrs.ra.deg)
assert_allclose(coords.skycoord.transform_to(
'icrs').dec.deg, skycoord_gal.icrs.dec.deg)
def test_mapaxis_repr():
axis = MapAxis([1, 2, 3], name='test')
assert 'MapAxis' in repr(axis)
def test_mapcoord_repr():
coord = MapCoord({'lon': 0, 'lat': 0, 'energy': 5})
assert 'MapCoord' in repr(coord)
| 36.652361
| 82
| 0.662178
|
5532e3a74862687d88008ed24cb88ab6e544223f
| 28,548
|
py
|
Python
|
Lib/distutils/command/install.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 2
|
2018-12-11T16:35:20.000Z
|
2019-01-23T16:42:17.000Z
|
Lib/distutils/command/install.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 1
|
2018-12-28T21:11:50.000Z
|
2018-12-28T21:11:50.000Z
|
Lib/distutils/command/install.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 3
|
2018-01-21T17:53:17.000Z
|
2021-09-08T10:22:05.000Z
|
"""distutils.command.install
Implements the Distutils 'install' command."""
import sys
import os
from distutils import log
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
HAS_USER_SITE = True
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_local': {
'purelib': '$base/local/lib/python$py_version_short/dist-packages',
'platlib': '$platbase/local/lib/python$py_version_short/dist-packages',
'headers': '$base/local/include/python$py_version_short/$dist_name',
'scripts': '$base/local/bin',
'data' : '$base/local',
},
'deb_system': {
'purelib': '$base/lib/python3/dist-packages',
'platlib': '$platbase/lib/python3/dist-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
}
# user site schemes
if HAS_USER_SITE:
INSTALL_SCHEMES['nt_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Python$py_version_nodot/Scripts',
'data' : '$userbase',
}
INSTALL_SCHEMES['unix_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers':
'$userbase/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install(Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
('install-layout=', None,
"installation layout to choose (known values: deb, unix)"),
]
boolean_options = ['compile', 'force', 'skip-build']
if HAS_USER_SITE:
user_options.append(('user', None,
"install in user site-package '%s'" % USER_SITE))
boolean_options.append('user')
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
"""Initializes options."""
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
self.prefix_option = None
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
# enable custom installation, known values: deb
self.install_layout = None
self.multiarch = None
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options(self):
"""Finalizes options."""
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError(
"must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError(
"must supply either home or prefix/exec-prefix -- not both")
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with prefix, "
"exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = sys.version.split()[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
try:
abiflags = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
abiflags = ''
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'abiflags': abiflags,
}
if HAS_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print("config vars:")
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
def dump_dirs(self, msg):
"""Dumps the list of user options."""
if not DEBUG:
return
from distutils.fancy_getopt import longopt_xlate
log.debug(msg + ":")
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = self.negative_opt[opt_name]
opt_name = opt_name.translate(longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = opt_name.translate(longopt_xlate)
val = getattr(self, opt_name)
log.debug(" %s: %s" % (opt_name, val))
def finalize_unix(self):
"""Finalizes options for posix platforms."""
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError(
"install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
self.prefix_option = self.prefix
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError(
"must not supply exec-prefix without prefix")
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
if self.install_layout:
if self.install_layout.lower() in ['deb']:
import sysconfig
self.multiarch = sysconfig.get_config_var('MULTIARCH')
self.select_scheme("deb_system")
elif self.install_layout.lower() in ['unix']:
self.select_scheme("unix_prefix")
else:
raise DistutilsOptionError(
"unknown value for --install-layout")
elif ((self.prefix_option and
os.path.normpath(self.prefix) != '/usr/local')
or sys.base_prefix != sys.prefix
or 'PYTHONUSERBASE' in os.environ
or 'VIRTUAL_ENV' in os.environ
or 'real_prefix' in sys.__dict__):
self.select_scheme("unix_prefix")
else:
if os.path.normpath(self.prefix) == '/usr/local':
self.prefix = self.exec_prefix = '/usr'
self.install_base = self.install_platbase = '/usr'
self.select_scheme("unix_local")
def finalize_other(self):
"""Finalizes options for non-posix platforms"""
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError(
"I don't know how to install stuff on '%s'" % os.name)
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def convert_paths(self, *names):
"""Call `convert_path` over `names`."""
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path(self):
"""Set `path_file` and `extra_dirs` using `extra_path`."""
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if isinstance(self.extra_path, str):
self.extra_path = self.extra_path.split(',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
path_file, extra_dirs = self.extra_path
else:
raise DistutilsOptionError(
"'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
def change_roots(self, *names):
"""Change the install directories pointed by name using root."""
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.items():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
# -- Command execution methods -------------------------------------
def run(self):
"""Runs the command."""
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
def create_path_file(self):
"""Creates the .pth file"""
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs(self):
"""Assembles the outputs of all the sub-commands."""
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs(self):
"""Returns the inputs of all the sub-commands"""
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib(self):
"""Returns true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers(self):
"""Returns true if the current distribution has any headers to
install."""
return self.distribution.has_headers()
def has_scripts(self):
"""Returns true if the current distribution has any scripts to.
install."""
return self.distribution.has_scripts()
def has_data(self):
"""Returns true if the current distribution has any data to.
install."""
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
| 41.076259
| 81
| 0.586626
|
f9c95e8753ec7b04987486a022d5e6c1778d771e
| 22,237
|
py
|
Python
|
archived/bio_007_world_database_on_protected_areas/contents/src/__init__.py
|
resource-watch/nrt-scripts
|
8b6230bdce676032a0d23028d42ebef44709c5f2
|
[
"MIT"
] | 6
|
2018-07-11T15:10:46.000Z
|
2021-08-30T12:50:22.000Z
|
archived/bio_007_world_database_on_protected_areas/contents/src/__init__.py
|
resource-watch/nrt-scripts
|
8b6230bdce676032a0d23028d42ebef44709c5f2
|
[
"MIT"
] | 36
|
2017-05-08T14:09:59.000Z
|
2021-12-01T04:19:49.000Z
|
archived/bio_007_world_database_on_protected_areas/contents/src/__init__.py
|
resource-watch/nrt-scripts
|
8b6230bdce676032a0d23028d42ebef44709c5f2
|
[
"MIT"
] | 5
|
2017-12-21T20:22:13.000Z
|
2020-03-09T18:25:48.000Z
|
import logging
import sys
import os
from collections import OrderedDict
import cartosql
import requests
import datetime
import copy
import time
import numpy as np
import urllib
import zipfile
import pandas as pd
import shutil
# do you want to delete everything currently in the Carto table when you run this script?
CLEAR_TABLE_FIRST = False
# name of data directory in Docker container
DATA_DIR = 'data'
# Carto username and API key for account where we will store the data
CARTO_USER = os.getenv('CARTO_USER')
CARTO_KEY = os.getenv('CARTO_KEY')
# do you want to update all the entries in the table when you run this script?
# True - update entire table
# False - just check for new areas added or areas deleted
# for now, we will replace everything in the table because there is no way to see if an area has been updated
REPLACE_ALL = True
# name of table in Carto where we will upload the data
CARTO_TABLE = 'bio_007_world_database_on_protected_areas'
# column of table that can be used as a unique ID (UID)
UID_FIELD='wdpa_id'
# column names and types for data table
# column names should be lowercase
# column types should be one of the following: geometry, text, numeric, timestamp
CARTO_SCHEMA = OrderedDict([
("the_geom", "geometry"),
("name", "text"),
("orig_name", "text"),
("wdpa_id", "numeric"),
("marine", "text"),
("rep_m_area", "numeric"),
("rep_area", "numeric"),
("mang_plan", "text"),
("own_type", "text"),
("country_name", "text"),
("iso3", "text"),
("iucn_cat", "text"),
("desig", "text"),
("desig_type", "text"),
("no_take", "text"),
("no_tk_area", "numeric"),
("status", "text"),
("mang_auth", "text"),
("gov_type", "text"),
("link", "text"),
("legal_status_updated_at", "timestamp"),
("status_yr", "numeric"),
])
# column names and paths to find them in the json returned by the source
JSON_LOC = {
"the_geom": ["geojson", "geometry"],
"name": ["name"],
"orig_name": ["original_name"],
"wdpa_id": ["id"],
"marine": ["marine"],
"rep_m_area": ["reported_marine_area"],
"rep_area": ["reported_area"],
"mang_plan": ["management_plan"],
"is_green_list": ["is_green_list"],
"own_type": ["owner_type"],
"country_name": ["countries", 0, "name"],
"iso3": ["countries", 0, "iso_3"],
"iucn_cat": ["iucn_category", "name"],
"desig": ["designation", "name"],
"desig_type": ["designation", "jurisdiction", "name"],
"no_take": ["no_take_status", "name"],
"no_tk_area": ["no_take_status", "area"],
"status": ["legal_status", "name"],
"mang_auth": ["management_authority", "name"],
"gov_type": ["governance", "governance_type"],
"link": ["links", "protected_planet"],
"legal_status_updated_at": ["legal_status_updated_at"],
"status_yr": ["legal_status_updated_at"],
}
# Resource Watch dataset API ID
# Important! Before testing this script:
# Please change this ID OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on a different dataset on Resource Watch
DATASET_ID = '2442891a-157a-40e6-9092-ee596e6d30ba'
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = f'http://api.resourcewatch.org/v1/dataset/{dataset}'
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR CARTO DATASETS
The functions below must go in every near real-time script for a Carto dataset.
Their format should not need to be changed.
'''
def checkCreateTable(table, schema, id_field, time_field=''):
'''
Create the table if it does not exist, and pull list of IDs already in the table if it does
INPUT table: Carto table to check or create (string)
schema: dictionary of column names and types, used if we are creating the table for the first time (dictionary)
id_field: name of column that we want to use as a unique ID for this table; this will be used to compare the
source data to the our table each time we run the script so that we only have to pull data we
haven't previously uploaded (string)
time_field: optional, name of column that will store datetime information (string)
RETURN list of existing IDs in the table, pulled from the id_field column (list of strings)
'''
# check it the table already exists in Carto
if cartosql.tableExists(table, user=CARTO_USER, key=CARTO_KEY):
# if the table does exist, get a list of all the values in the id_field column
logging.info('Fetching existing IDs')
r = cartosql.getFields(id_field, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of strings, removing the first and last entries (header and an empty space at end)
return r.text.split('\r\n')[1:-1]
else:
# if the table does not exist, create it with columns based on the schema input
logging.info('Table {} does not exist, creating'.format(table))
cartosql.createTable(table, schema, user=CARTO_USER, key=CARTO_KEY)
# if a unique ID field is specified, set it as a unique index in the Carto table; when you upload data, Carto
# will ensure no two rows have the same entry in this column and return an error if you try to upload a row with
# a duplicate unique ID
if id_field:
cartosql.createIndex(table, id_field, unique=True, user=CARTO_USER, key=CARTO_KEY)
# if a time_field is specified, set it as an index in the Carto table; this is not a unique index
if time_field:
cartosql.createIndex(table, time_field, user=CARTO_USER, key=CARTO_KEY)
# return an empty list because there are no IDs in the new table yet
return []
def delete_local():
'''
Delete all files and folders in Docker container's data directory
'''
try:
# for each object in the data directory
for f in os.listdir(DATA_DIR):
# try to remove it as a file
try:
logging.info('Removing {}'.format(f))
os.remove(DATA_DIR+'/'+f)
# if it is not a file, remove it as a folder
except:
shutil.rmtree(f, ignore_errors=True)
except NameError:
logging.info('No local files to clean.')
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def fetch_ids(existing_ids_int):
'''
Get a list of WDPA IDs in the version of the dataset we are pulling
INPUT existing_ids_int: list of WDPA IDs that we already have in our Carto table (list of integers)
RETURN new_ids: list of IDs in the WDPA table that we don't already have in our existing IDs (list of strings)
all_ids: list of all IDs in the WDPA table (list of strings)
'''
# pull current csv containing WDPA IDs
# note: IDs are pulled from this csv and not the API because querying the API is very slow, so it is much faster
# to get a list of all the IDS from this csv
filename_csv = 'WDPA_Apr2021_Public_csv'
url_csv = f'http://d1gam3xoknrgr2.cloudfront.net/current/{filename_csv}.zip'
urllib.request.urlretrieve(url_csv, DATA_DIR + '/' + filename_csv + '.zip')
# unzip file containing csv
zip_ref = zipfile.ZipFile(DATA_DIR + '/' + filename_csv + '.zip', 'r')
zip_ref.extractall(DATA_DIR + '/' + filename_csv)
zip_ref.close()
# read in WDPA csv as a pandas dataframe
filename = DATA_DIR + '/' + filename_csv + '/' + filename_csv + '.csv'
wdpa_df = pd.read_csv(filename, low_memory=False)
# get a list of all IDs in the table
all_ids = np.unique(wdpa_df.WDPAID.to_list()).tolist()
logging.info('found {} ids'.format(len(all_ids)))
# get a list of the IDs in the table that we don't already have in our existing IDs
new_ids = np.unique(np.setdiff1d(all_ids, existing_ids_int)).tolist()
logging.info('{} new ids'.format(len(new_ids)))
return new_ids, all_ids
def delete_carto_entries(id_list, column):
'''
Delete entries in Carto table based on values in a specified column
INPUT id_list: list of column values for which you want to delete entries in table (list of strings)
column: column name where you should search for these values (string)
'''
# generate empty variable to store WHERE clause of SQL query we will send
where = None
# go through each ID in the list to be deleted
for delete_id in id_list:
# if we already have values in the SQL query, add the new value with an OR before it
if where:
where += f' OR {column} = {delete_id}'
# if the SQL query is empty, create the start of the WHERE clause
else:
where = f'{column} = {delete_id}'
# if where statement is long or we are on the last id, delete rows
# the length of 15000 was chosen arbitrarily - all the IDs to be deleted could not be sent at once, but no
# testing was done to optimize this value
if len(where) > 15000 or delete_id == id_list[-1]:
cartosql.deleteRows(CARTO_TABLE, where=where, user=CARTO_USER,
key=CARTO_KEY)
# after we have deleted a set of rows, start over with a blank WHERE clause for the SQL query so we don't
# try to delete rows we have already deleted
where = None
def processData(existing_ids):
'''
Fetch, process, upload, and clean new data
INPUT existing_ids: list of WDPA IDs that we already have in our Carto table (list of strings)
RETURN num_new: number of rows of data sent to Carto table (integer)
'''
# turn list of existing ids from strings into integers
existing_ids_int = [int(i) for i in existing_ids]
# fetch list of WDPA IDs (list of all IDs and list of new ones) so that we can pull info from the API about each area
new_ids, all_ids = fetch_ids(existing_ids_int)
# if we have designated that we want to replace all the ids, then the list of IDs we will query (id_list) will
# include all the IDs available; otherwise, we will just pull the new IDs
if REPLACE_ALL==True:
id_list = all_ids
else:
id_list = new_ids
# create empty list to store IDs for rows we want to send to Carto so that we can delete any current entries before
# sending new data
send_list=[]
# create empty lists to store data we will be sending to Carto table
new_data = []
# go through and fetch information for each of the ids
for id in id_list:
# set try number to 0 for this area's ID because this will be our first try fetching the data
try_num=0
# generate the url to pull data for this area from the WDPA API
# WDPA API Reference document: https://api.protectedplanet.net/documentation#get-v3protectedareas
url = "https://api.protectedplanet.net/v3/protected_areas/{}?token={}".format(id, os.getenv('WDPA_key'))
# try at least 3 times to fetch the data for this area from the source
if try_num <3:
try:
r = requests.get(url)
except:
# if the API call fails, wait 60 seconds before moving on to the next attempt to fetch the data
time.sleep(60)
try_num+=1
else:
# after 3 failures to fetch data for this ID, log that the data could not be fetched
logging.info(f'Could not fetch {id}')
# process the retrieved data
try:
# pull data from request response json
data = r.json()['protected_area']
# create an empty list to store the processed data for this row that we will send to Carto
row = []
# go through each column in the Carto table
for key in CARTO_SCHEMA.keys():
# find the location in the json where you can find this column's data
location = JSON_LOC[key]
# make a copy of the data that we can modify
key_data = copy.copy(data)
# if we are fetching data for the country_name column and there is more than one country,
# we will need to process this entry
if key == 'country_name' and len(key_data['countries']) > 1:
# get the list of countries
countries = key_data["countries"]
# make a list of the country names
c_list=[]
for country in countries:
c_list.append(country["name"])
# turn this list into a single string with the countries names listed, separated by a semicolon
key_data = '; '.join(c_list)
# we will also need to process the iso3 data if there is more than one country
elif key == 'iso3' and len(key_data['countries']) > 1:
# get the list of countries
countries= key_data["countries"]
# make a list of the country iso3 values
c_list=[]
for country in countries:
c_list.append(country["iso_3"])
# turn this list into a single string with the countries iso3s listed, separated by a semicolon
key_data = '; '.join(c_list)
# for any other column, no special processing is required at this point, just pull out the data from
# the correct location in the json
else:
# go through each nested name
for sub in location:
# try to pull out the data from that name
try:
key_data = key_data[sub]
# if the data is a string, remove and leading or tailing whitespace
if type(key_data)==str:
key_data = key_data.rstrip()
# if we aren't able to find the data for this column, set the data as a None value and move
# on to the next column
except (TypeError, IndexError):
key_data=None
break
# if we were able to successfully find the value for the column, do any additional required processing
if key_data:
# pull the year from the data from the 'legal status updated at' field
if key == 'status_yr':
key_data=int(key_data[-4:])
# turn the wdpa_id into an integer
if key == 'wdpa_id':
# pull it from the API entry, if possible
if key_data:
key_data = int(key_data)
# otherwise just use the id from the list of ids we are going through (some entries are missing
# this field on the API)
else:
key_data=int(id)
# add this ID to the list of IDs we are sending new data for
send_list.append(key_data)
# turn these columns into float data
if key == 'no_tk_area' or key == 'rep_area' or key == 'rep_m_area':
key_data=float(key_data)
# turn the legal_status_updated_at column into a datetime
if key == 'legal_status_updated_at':
key_data=datetime.datetime.strptime(key_data, '%m/%d/%Y')
# if no data was found for this column, make sure the entry is None
else:
key_data=None
# add this value to the row data
row.append(key_data)
# if this ID's row of data was processed, add it to the new data to be sent to Carto
if len(row):
new_data.append(row)
# if we failed to process this data, log an error
except Exception as e:
logging.error('error pulling {}'.format(id))
# send data
# for every 1000 rows processed, send the data to Carto
if (id_list.index(id) % 1000)==0 and id_list.index(id)>1:
logging.info('{} records processed.'.format(id_list.index(id)))
num_new = len(new_data)
if num_new:
# delete the old entries in the Carto table for the IDs we have processed
logging.info('Deleting old records in this batch')
delete_carto_entries(send_list, 'wdpa_id')
# push new data rows to Carto
logging.info('Adding {} new records.'.format(num_new))
# Carto does not accept Nans for numeric columns; convert them to None
for row in new_data:
row = [None if x is np.nan else x for x in row]
insert_exception = None
# maximum attempts to make
n_tries = 5
# sleep time between each attempt
retry_wait_time = 6
for i in range(n_tries):
try:
cartosql.blockInsertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), [row], user=CARTO_USER, key=CARTO_KEY)
except Exception as e: # if there's an exception do this
insert_exception = e
logging.warning('Attempt #{} to upload unsuccessful. Trying again after {} seconds'.format(i, retry_wait_time))
logging.debug('Exception encountered during upload attempt: '+ str(e))
time.sleep(retry_wait_time)
else: # if no exception do this
break # break this for loop, because we don't need to try again
else:
# this happens if the for loop completes, ie if it attempts to insert row n_tries times
logging.error('Upload of row has failed after {} attempts'.format(n_tries))
logging.error('Raising exception encountered during last upload attempt')
logging.error(insert_exception)
raise insert_exception
# start with empty lists again to process the next batch of data
new_data = []
send_list = []
# delete rows for areas that are no longer in the WDPA dataset
logging.info('Deleting records that are no longer in the database.')
# get a list of IDs that are in the Carto table but not in the most recent WDPA dataset
deleted_ids = np.setdiff1d(existing_ids_int, id_list)
# delete these rows from the Carto table
delete_carto_entries(deleted_ids, 'wdpa_id')
logging.info('{} ids deleted'.format(len(deleted_ids)))
return(num_new)
def updateResourceWatch(num_new):
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date' and updating any dates on layers
'''
# If there are new entries in the Carto table
if num_new>0:
# Update dataset's last update date on Resource Watch
most_recent_date = datetime.datetime.utcnow()
lastUpdateDate(DATASET_ID, most_recent_date)
# Update the dates on layer legends - TO BE ADDED IN FUTURE
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# clear the table before starting, if specified
if CLEAR_TABLE_FIRST:
logging.info('Clearing Table')
# if the table exists
if cartosql.tableExists(CARTO_TABLE, user=CARTO_USER, key=CARTO_KEY):
# delete all the rows
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=CARTO_USER, key=CARTO_KEY)
# note: we do not delete the entire table because this will cause the dataset visualization on Resource Watch
# to disappear until we log into Carto and open the table again. If we simply delete all the rows, this
# problem does not occur
# Check if table exists, create it if it does not
logging.info('Checking if table exists and getting existing IDs.')
existing_ids = checkCreateTable(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD)
# Fetch, process, and upload the new data
logging.info('Fetching new data')
num_new = processData(existing_ids)
logging.info('Previous rows: {}, New rows: {}'.format(len(existing_ids), num_new))
# Update Resource Watch
updateResourceWatch(num_new)
# Delete local files in Docker container
delete_local()
logging.info('SUCCESS')
| 47.012685
| 148
| 0.620362
|
fe22fad34894bcce704e2b82b6da6080f6de0303
| 1,916
|
py
|
Python
|
example/example.py
|
RomainCendre/misvm
|
37bca2aafe884d648ddcb8839dc56e3b06e95364
|
[
"BSD-3-Clause"
] | 225
|
2015-01-04T15:37:04.000Z
|
2022-03-21T23:01:31.000Z
|
example/example.py
|
RomainCendre/misvm
|
37bca2aafe884d648ddcb8839dc56e3b06e95364
|
[
"BSD-3-Clause"
] | 22
|
2015-11-24T15:23:54.000Z
|
2021-07-07T10:28:58.000Z
|
example/example.py
|
RomainCendre/misvm
|
37bca2aafe884d648ddcb8839dc56e3b06e95364
|
[
"BSD-3-Clause"
] | 89
|
2015-01-19T23:07:58.000Z
|
2021-09-18T02:54:09.000Z
|
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
from misvmio import parse_c45, bag_set
import misvm
def main():
# Load list of C4.5 Examples
example_set = parse_c45('musk1')
# Get stats to normalize data
raw_data = np.array(example_set.to_float())
data_mean = np.average(raw_data, axis=0)
data_std = np.std(raw_data, axis=0)
data_std[np.nonzero(data_std == 0.0)] = 1.0
def normalizer(ex):
ex = np.array(ex)
normed = ((ex - data_mean) / data_std)
# The ...[:, 2:-1] removes first two columns and last column,
# which are the bag/instance ids and class label, as part of the
# normalization process
return normed[2:-1]
# Group examples into bags
bagset = bag_set(example_set)
# Convert bags to NumPy arrays
bags = [np.array(b.to_float(normalizer)) for b in bagset]
labels = np.array([b.label for b in bagset], dtype=float)
# Convert 0/1 labels to -1/1 labels
labels = 2 * labels - 1
# Spilt dataset arbitrarily to train/test sets
train_bags = bags[10:]
train_labels = labels[10:]
test_bags = bags[:10]
test_labels = labels[:10]
# Construct classifiers
classifiers = {}
classifiers['MissSVM'] = misvm.MissSVM(kernel='linear', C=1.0, max_iters=20)
classifiers['sbMIL'] = misvm.sbMIL(kernel='linear', eta=0.1, C=1e2)
classifiers['SIL'] = misvm.SIL(kernel='linear', C=1.0)
# Train/Evaluate classifiers
accuracies = {}
for algorithm, classifier in classifiers.items():
classifier.fit(train_bags, train_labels)
predictions = classifier.predict(test_bags)
accuracies[algorithm] = np.average(test_labels == np.sign(predictions))
for algorithm, accuracy in accuracies.items():
print('\n%s Accuracy: %.1f%%' % (algorithm, 100 * accuracy))
if __name__ == '__main__':
main()
| 31.409836
| 80
| 0.654489
|
5606a42ce474bb55533331643079b3c3e2ca085d
| 3,672
|
py
|
Python
|
render_demo.py
|
ArjanJawahier/nerf
|
baae9a419cb82939189cd5a5754d9ad3211b76e4
|
[
"MIT"
] | null | null | null |
render_demo.py
|
ArjanJawahier/nerf
|
baae9a419cb82939189cd5a5754d9ad3211b76e4
|
[
"MIT"
] | null | null | null |
render_demo.py
|
ArjanJawahier/nerf
|
baae9a419cb82939189cd5a5754d9ad3211b76e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os, sys
# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import numpy as np
import imageio
import json
import random
import time
import pprint
import matplotlib.pyplot as plt
import run_nerf
from load_llff import load_llff_data
from load_deepvoxels import load_dv_data
from load_blender import load_blender_data
# In[2]:
basedir = './logs'
# expname = 'fern_example'
expname = 'fern_test'
config = os.path.join(basedir, expname, 'config.txt')
print('Args:')
print(open(config, 'r').read())
parser = run_nerf.config_parser()
# VERY IMPORTANT:
print("Keep in mind the model number of iterations has changed from 200000 to 40000.")
n_iterations = 40000
n_iterations = str(n_iterations).zfill(6)
args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, f'model_{n_iterations}.npy')))
print('loaded args')
images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
H, W, focal = poses[0,:3,-1].astype(np.float32)
H = int(H)
W = int(W)
hwf = [H, W, focal]
images = images.astype(np.float32)
poses = poses.astype(np.float32)
if args.no_ndc:
near = tf.reduce_min(bds) * .9
far = tf.reduce_max(bds) * 1.
else:
near = 0.
far = 1.
# In[3]:
# Create nerf model
_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)
bds_dict = {
'near' : tf.cast(near, tf.float32),
'far' : tf.cast(far, tf.float32),
}
render_kwargs_test.update(bds_dict)
print('Render kwargs:')
pprint.pprint(render_kwargs_test)
down = 4
render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}
render_kwargs_fast['N_importance'] = 0
c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix
test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)
img = np.clip(test[0],0,1)
# plt.imshow(img)
# plt.show()
# In[9]:
# down started at 8
down = 1 # trade off resolution+aliasing for render speed to make this video faster
frames = []
for i, c2w in enumerate(render_poses):
if i%8==0: print(i)
test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w[:3,:4], **render_kwargs_fast)
frames.append((255*np.clip(test[0],0,1)).astype(np.uint8))
print('done, saving')
f = f'logs/{expname}/video.mp4'
imageio.mimwrite(f, frames, fps=30, quality=8)
from IPython.display import Video
Video(f, height=320)
print("Video has been saved. Good enough for my purposes here.")
print(f"filename: logs/{expname}/video_{n_iterations}.mp4")
# Video has been saved. Good enough for my purposes here.
# # In[4]:
# get_ipython().run_line_magic('matplotlib', 'inline')
# from ipywidgets import interactive, widgets
# import matplotlib.pyplot as plt
# import numpy as np
# def f(x, y, z):
# c2w = tf.convert_to_tensor([
# [1,0,0,x],
# [0,1,0,y],
# [0,0,1,z],
# [0,0,0,1],
# ], dtype=tf.float32)
# test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)
# img = np.clip(test[0],0,1)
# plt.figure(2, figsize=(20,6))
# plt.imshow(img)
# plt.show()
# sldr = lambda : widgets.FloatSlider(
# value=0.,
# min=-1.,
# max=1.,
# step=.01,
# )
# names = ['x', 'y', 'z']
# interactive_plot = interactive(f, **{n : sldr() for n in names})
# interactive_plot
# In[ ]:
| 22.95
| 128
| 0.651416
|
2834696992cf8a73343d4895f98404b69bf93115
| 4,255
|
py
|
Python
|
kettle/stream_test.py
|
AradhanaSingh/test-infra
|
62a6014039806230c3fd481edf6291cf8bea1be2
|
[
"Apache-2.0"
] | 5
|
2019-11-23T21:06:39.000Z
|
2021-01-19T21:37:07.000Z
|
kettle/stream_test.py
|
AradhanaSingh/test-infra
|
62a6014039806230c3fd481edf6291cf8bea1be2
|
[
"Apache-2.0"
] | 433
|
2019-09-26T06:52:31.000Z
|
2022-03-24T21:18:01.000Z
|
kettle/stream_test.py
|
AradhanaSingh/test-infra
|
62a6014039806230c3fd481edf6291cf8bea1be2
|
[
"Apache-2.0"
] | 49
|
2019-10-03T12:31:35.000Z
|
2021-09-22T20:36:38.000Z
|
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
import stream
import make_db_test
import model
class FakeSub:
def __init__(self, pulls):
self.pulls = pulls
self.trace = []
def pull(self, return_immediately=False, **_kwargs):
self.trace.append(['pull', return_immediately])
return self.pulls.pop(0)
def acknowledge(self, acks):
self.trace.append(['ack', acks])
def modify_ack_deadline(self, acks, time):
self.trace.append(['modify-ack', acks, time])
class FakeTable:
def __init__(self, name, schema, trace=None):
self.name = name
self.schema = schema
self.trace = [] if trace is None else trace
def insert_data(self, *args, **kwargs):
self.trace.append(['insert-data', args, kwargs])
return []
class Attrs:
def __init__(self, attributes):
self.attributes = attributes
class FakeSchemaField:
def __init__(self, **kwargs):
self.__dict__ = kwargs
class StreamTest(unittest.TestCase):
def test_main(self):
# It's easier to run a full integration test with stubbed-out
# external interfaces and validate the trace than it is to test
# each individual piece.
# The components are mostly tested in make_*_test.py.
db = model.Database(':memory:')
fakesub = FakeSub([
[
('a', Attrs({'eventType': 'OBJECT_DELETE'})),
],
[
('b', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/123/finished.json',
'bucketId': 'kubernetes-jenkins'})),
],
[],
[
('c', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/123/finished.json',
'bucketId': 'kubernetes-jenkins'})),
],
[],
[
('d', Attrs({
'eventType': 'OBJECT_FINALIZE',
'objectId': 'logs/fake/124/started.json'})),
],
[],
])
faketable = FakeTable('day', stream.load_schema(FakeSchemaField), fakesub.trace)
tables = {'day': (faketable, 'incr')}
stream.main(
db, fakesub, tables, make_db_test.MockedClient, [1, 0, 0, 0].pop)
# uncomment if the trace changes
# import pprint; pprint.pprint(fakesub.trace)
# self.maxDiff = 3000
now = make_db_test.MockedClient.NOW
self.assertEqual(
fakesub.trace,
[['pull', False], ['pull', True], ['pull', True],
['ack', ['a']],
['modify-ack', ['b'], 180],
['ack', ['b']],
['insert-data',
([[5,
now - 5,
now,
True,
'SUCCESS',
None,
'gs://kubernetes-jenkins/logs/fake/123',
'fake',
123,
[],
[{'name': 'Foo', 'time': 3.0},
{'failed': True,
'failure_text': 'stacktrace',
'name': 'Bad',
'time': 4.0}],
2,
1,
None]],
[1]),
{'skip_invalid_rows': True}],
['pull', False], ['pull', True],
['modify-ack', ['c'], 180],
['ack', ['c']],
['pull', False], ['pull', True],
['ack', ['d']]])
if __name__ == '__main__':
unittest.main()
| 29.755245
| 88
| 0.510223
|
6ec39df896d3b8fa7015475540c9870822f95454
| 11,696
|
py
|
Python
|
test/test_mapper.py
|
bjintechinsight/kepler-mapper
|
dee61ed78a7d2d1606d22f8b71df429544749b90
|
[
"MIT"
] | 5
|
2018-03-21T21:54:48.000Z
|
2021-09-20T11:47:20.000Z
|
test/test_mapper.py
|
bjintechinsight/kepler-mapper
|
dee61ed78a7d2d1606d22f8b71df429544749b90
|
[
"MIT"
] | null | null | null |
test/test_mapper.py
|
bjintechinsight/kepler-mapper
|
dee61ed78a7d2d1606d22f8b71df429544749b90
|
[
"MIT"
] | 4
|
2018-02-28T11:06:18.000Z
|
2021-10-01T13:37:28.000Z
|
import pytest
import numpy as np
import warnings
from kmapper import KeplerMapper, Cover, cluster
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import Lasso
from sklearn.manifold import MDS
from scipy import sparse
from scipy.spatial import distance
from sklearn import neighbors
class TestLogging():
""" Simple tests that confirm map completes at each logging level
"""
def test_runs_with_logging_0(self, capsys):
mapper = KeplerMapper(verbose=0)
data = np.random.rand(100, 2)
graph = mapper.map(data)
captured = capsys.readouterr()
assert captured[0] == ""
def test_runs_with_logging_1(self):
mapper = KeplerMapper(verbose=1)
data = np.random.rand(100, 2)
graph = mapper.map(data)
def test_runs_with_logging_2(self):
mapper = KeplerMapper(verbose=2)
data = np.random.rand(100, 2)
graph = mapper.map(data)
def test_logging_in_project(self, capsys):
mapper = KeplerMapper(verbose=2)
data = np.random.rand(100, 2)
lens = mapper.project(data)
captured = capsys.readouterr()
assert "Projecting on" in captured[0]
def test_logging_in_fit_transform(self, capsys):
mapper = KeplerMapper(verbose=2)
data = np.random.rand(100, 2)
lens = mapper.fit_transform(data)
captured = capsys.readouterr()
assert "Composing projection pipeline of length 1" in captured[0]
class TestDataAccess:
def test_members_from_id(self):
mapper = KeplerMapper(verbose=1)
data = np.random.rand(100, 2)
ids = np.random.choice(10, 100)
data[ids] = 2
graph = mapper.map(data)
graph['nodes']['new node'] = ids
mems = mapper.data_from_cluster_id('new node', graph, data)
np.testing.assert_array_equal(data[ids], mems)
def test_wrong_id(self):
mapper = KeplerMapper(verbose=1)
data = np.random.rand(100, 2)
graph = mapper.map(data)
mems = mapper.data_from_cluster_id('new node', graph, data)
np.testing.assert_array_equal(mems, np.array([]))
class TestMap:
def test_simplices(self):
mapper = KeplerMapper()
X = np.random.rand(100, 2)
lens = mapper.fit_transform(X)
graph = mapper.map(lens, X=X, cover=Cover(n_cubes=3, perc_overlap=1.5), clusterer=cluster.DBSCAN(metric='euclidean', min_samples=3))
assert max([len(s) for s in graph["simplices"]]) <= 2
nodes = [n for n in graph['simplices'] if len(n) == 1]
edges = [n for n in graph['simplices'] if len(n) == 2]
assert len(nodes) == 3
assert len(edges) == 3
def test_precomputed(self):
mapper = KeplerMapper()
X = np.random.rand(100, 2)
X_pdist = distance.squareform(distance.pdist(X, metric='euclidean'))
lens = mapper.fit_transform(X_pdist)
graph = mapper.map(lens, X=X_pdist, cover=Cover(n_cubes=10, perc_overlap=0.8), clusterer=cluster.DBSCAN(metric='precomputed', min_samples=3), precomputed=True)
graph2 = mapper.map(lens, X=X, cover=Cover(n_cubes=10, perc_overlap=0.8), clusterer=cluster.DBSCAN(metric='euclidean', min_samples=3))
assert graph['links'] == graph2['links']
assert graph['nodes'] == graph2['nodes']
assert graph['simplices'] == graph2['simplices']
def test_precomputed_with_knn_lens(self):
mapper = KeplerMapper()
X = np.random.rand(100, 5)
lens = mapper.fit_transform(X, projection="knn_distance_3", distance_matrix="chebyshev")
assert lens.shape == (100, 1)
def test_affinity_prop_clustering(self):
mapper = KeplerMapper()
X = np.random.rand(100, 2)
lens = mapper.fit_transform(X)
graph = mapper.map(lens, X,
clusterer=cluster.AffinityPropagation())
class TestLens():
# TODO: most of these tests only accommodate the default option. They need to be extended to incorporate all possible transforms.
# one test for each option supported
def test_str_options(self):
mapper = KeplerMapper()
data = np.random.rand(100, 10)
options = [
['sum', np.sum],
['mean', np.mean],
['median', np.median],
['max', np.max],
['min', np.min],
['std', np.std],
['l2norm', np.linalg.norm],
]
first_point = data[0]
last_point = data[-1]
for tag, func in options:
lens = mapper.fit_transform(data, projection=tag, scaler=None)
np.testing.assert_almost_equal(lens[0][0], func(first_point))
np.testing.assert_almost_equal(lens[-1][0], func(last_point))
# For dist_mean, just make sure the code runs without breaking, not sure how to test this best
lens = mapper.fit_transform(data, projection="dist_mean", scaler=None)
def test_knn_distance(self):
mapper = KeplerMapper()
data = np.random.rand(100, 5)
lens = mapper.project(data, projection="knn_distance_4", scaler=None)
nn = neighbors.NearestNeighbors(n_neighbors=4)
nn.fit(data)
lens_confirm = np.sum(nn.kneighbors(data, n_neighbors=4, return_distance=True)[0], axis=1).reshape((-1,1))
assert lens.shape == (100, 1)
np.testing.assert_array_equal(lens, lens_confirm)
def test_distance_matrix(self):
# todo, test other distance_matrix functions
mapper = KeplerMapper(verbose=4)
X = np.random.rand(100, 10)
lens = mapper.fit_transform(X, distance_matrix='euclidean')
X_pdist = distance.squareform(distance.pdist(X, metric='euclidean'))
lens2 = mapper.fit_transform(X_pdist)
np.testing.assert_array_equal(lens, lens2)
def test_sparse_array(self):
mapper = KeplerMapper()
data = sparse.random(100, 10)
lens = mapper.fit_transform(data)
def test_lens_size(self):
mapper = KeplerMapper()
data = np.random.rand(100, 10)
lens = mapper.fit_transform(data)
assert lens.shape[0] == data.shape[0]
def test_map_custom_lens(self):
# I think that map currently requires fit_transform to be called first
mapper = KeplerMapper()
data = np.random.rand(100, 2)
graph = mapper.map(data)
assert graph["meta_data"]["projection"] == "custom"
assert graph["meta_data"]["scaler"] == "None"
def test_project_sklearn_class(self):
mapper = KeplerMapper()
data = np.random.rand(100, 5)
lens = mapper.project(data, projection=PCA(
n_components=1), scaler=None)
pca = PCA(n_components=1)
lens_confirm = pca.fit_transform(data)
assert lens.shape == (100, 1)
np.testing.assert_array_equal(lens, lens_confirm)
def test_tuple_projection(self):
mapper = KeplerMapper()
data = np.random.rand(100, 5)
y = np.random.rand(100, 1)
lasso = Lasso()
lasso.fit(data, y)
lens = mapper.project(data, projection=(lasso, data), scaler=None)
# hard to test this, at least it doesn't fail
assert lens.shape == (100, 1)
np.testing.assert_array_equal(
lens, lasso.predict(data).reshape((100, 1)))
def test_tuple_projection_fit(self):
mapper = KeplerMapper()
data = np.random.rand(100, 5)
y = np.random.rand(100, 1)
lens = mapper.project(data, projection=(Lasso(), data, y), scaler=None)
# hard to test this, at least it doesn't fail
assert lens.shape == (100, 1)
def test_projection_without_pipeline(self):
# accomodate scaling, values are in (0,1), but will be scaled slightly
atol = 0.1
mapper = KeplerMapper(verbose=1)
data = np.random.rand(100, 5)
lens = mapper.project(data, projection=[0, 1])
np.testing.assert_allclose(lens, data[:, :2], atol=atol)
lens = mapper.project(data, projection=[0])
np.testing.assert_allclose(lens, data[:, :1], atol=atol)
def test_pipeline(self):
# TODO: break this test into many smaller ones.
input_data = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], np.float64)
atol_big = 0.1
atol_small = 0.001
mapper = KeplerMapper()
lens_1 = mapper.fit_transform(input_data,
projection=[[0, 1], "sum"],
scaler=None)
expected_output_1 = np.array([[3], [7], [11], [15]])
lens_2 = mapper.fit_transform(input_data,
projection=[[0, 1], "sum"])
expected_output_2 = np.array([[0], [0.33], [0.66], [1.]])
lens_3 = mapper.fit_transform(input_data,
projection=[[0, 1], "mean"],
scaler=None)
expected_output_3 = np.array([[1.5], [3.5], [5.5], [7.5]])
lens_4 = mapper.fit_transform(input_data,
projection=[[1], "mean"],
scaler=None)
expected_output_4 = np.array([[2], [4], [6], [8]])
lens_5 = mapper.fit_transform(input_data,
projection=[[0, 1], "l2norm"],
scaler=None,
distance_matrix=[False, "pearson"])
expected_output_5 = np.array([[2.236], [5.], [7.81], [10.630]])
lens_6 = mapper.fit_transform(input_data,
projection=[[0, 1], [0, 1]],
scaler=None,
distance_matrix=[False, "cosine"])
expected_output_6 = np.array([[0., 0.016],
[0.016, 0.], [0.026, 0.0013], [0.032, 0.0028]])
lens_7 = mapper.fit_transform(input_data,
projection=[[0, 1], "l2norm"],
scaler=None,
distance_matrix=[False, "cosine"])
expected_output_7 = np.array(
[[0.044894], [0.01643], [0.026617], [0.032508]])
lens_8 = mapper.fit_transform(input_data, projection=[[0, 1], "sum"])
lens_9 = mapper.fit_transform(input_data, projection="sum")
lens_10 = mapper.fit_transform(input_data, projection="sum",
scaler=StandardScaler())
lens_11 = mapper.fit_transform(input_data, projection=[[0, 1], "sum"],
scaler=[None, StandardScaler()])
expected_output_10 = np.array(
[[-1.341641], [-0.447214], [0.447214], [1.341641]])
np.testing.assert_array_equal(lens_1, expected_output_1)
np.testing.assert_allclose(lens_2, expected_output_2, atol=atol_big)
np.testing.assert_array_equal(lens_3, expected_output_3)
np.testing.assert_array_equal(lens_4, expected_output_4)
np.testing.assert_allclose(lens_5, expected_output_5, atol=atol_small)
np.testing.assert_allclose(lens_6, expected_output_6, atol=atol_small)
np.testing.assert_allclose(lens_7, expected_output_7, atol=atol_small)
np.testing.assert_allclose(lens_8, lens_9, atol=atol_small)
np.testing.assert_allclose(
lens_10, expected_output_10, atol=atol_small)
np.testing.assert_array_equal(lens_10, lens_11)
assert not np.array_equal(lens_10, lens_2)
assert not np.array_equal(lens_10, lens_1)
| 37.367412
| 167
| 0.594049
|
04c46d8bf1d396461e3870e5fcbccc9b907fa852
| 628
|
py
|
Python
|
scripts/move_turtle_by_param.py
|
leeeju/R.O.S-
|
66910c0ec7265bbfe147aad9f1cc87cd2515eabf
|
[
"Python-2.0",
"OLDAP-2.7"
] | 2
|
2021-07-25T23:12:45.000Z
|
2021-07-25T23:12:46.000Z
|
scripts/move_turtle_by_param.py
|
leeeju/R.O.S-
|
66910c0ec7265bbfe147aad9f1cc87cd2515eabf
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
scripts/move_turtle_by_param.py
|
leeeju/R.O.S-
|
66910c0ec7265bbfe147aad9f1cc87cd2515eabf
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
def move_turtle():
pb = rospy.Publisher("/turtle1/cmd_vel", Twist, queue_size=20)
tw = Twist()
tw.linear.x = 1.00
tw.angular.z = 0.50
tw.angular.y = 0.80
pb.publish(tw)
if __name__ == '__main__':
try:
rospy.init_node('move_by_param')
while not rospy.is_shutdown():
param = rospy.get_param("/turtle1/go_turtle")
if param is True:
move_turtle()
else: pass
except rospy.ROSInterruptException: pass
| 24.153846
| 66
| 0.555732
|
9c35b8e3fcda54e096588898ecf5fc4aaa25fc2c
| 6,381
|
py
|
Python
|
flaskr/table_init.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | null | null | null |
flaskr/table_init.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | null | null | null |
flaskr/table_init.py
|
Ln-Yangzl/yukiyu-webpage
|
f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7
|
[
"Apache-2.0"
] | 2
|
2021-03-23T12:22:04.000Z
|
2021-05-24T13:56:26.000Z
|
# 所有的表的初始化创建语句
# 可调用该模块进行表的初始化
import pymysql
import traceback
from viewAndTrigger_init import create_view_detail_info
from db_bangumi_insert import insert_bangumi
from dbBangumiInfoInsert import insert_bangumi_info
#bangumi_list总表
def create_table_bangumi_list(db):
cursor=db.cursor()
sql = """
create table if not exists bangumi_list(
bangumi_id int not null,
name varchar(80) not null,
img varchar(100) not null,
primary key (bangumi_id))ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table bangumi_list error!')
traceback.print_exc()
#动漫网站分表
def create_table_bangumi(db, table_name):
cursor=db.cursor()
sql = """CREATE TABLE if not exists %s(
bangumi_id int not NULL,
title varchar(50) not NULL,
play_url varchar(50) not NULL,
episode varchar(50) not NULL,
last_update date not NULL,
PRIMARY KEY (bangumi_id),
foreign key (bangumi_id) references bangumi_list(bangumi_id)
on update cascade
on delete cascade)ENGINE=InnoDB DEFAULT CHARSET=utf8;"""% \
(table_name)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
#声优表
def create_table_cast(db):
cursor=db.cursor()
table_name="bangumi_cast"
sql="""CREATE TABLE if not exists %s(
bangumi_id int not null,
actor varchar(50) not null,
primary key (bangumi_id, actor),
foreign key (bangumi_id) references bangumi_list(bangumi_id)
on update cascade
on delete cascade) ENGINE=InnoDB DEFAULT CHARSET=utf8;"""% \
(table_name)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
#制作公司表
def create_table_company(db):
cursor=db.cursor()
table_name="company"
sql="""
create table %s(
company_id int primary key auto_increment,
company_name varchar(50) not null,
masterpiece varchar(50)) ENGINE=InnoDB DEFAULT CHARSET=utf8;"""% \
(table_name)
sql2="""drop table if exists bangumi_company;"""
sql3="drop table if exists %s;"%(table_name)
try:
print('start to execute:')
print(sql)
cursor.execute(sql2)
cursor.execute(sql3)
cursor.execute(sql)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
# 监督表
def create_table_conduct(db):
cursor=db.cursor()
table_name="conduct"
sql1="drop table if exists bangumi_conduct;"
sql2="drop table if exists conduct;"
sql3="""create table if not exists conduct(
conduct_id int primary key auto_increment,
conduct_name varchar(50) not null,
masterpiece varchar(50))ENGINE=InnoDB DEFAULT CHARSET=utf8;"""
try:
print('start to execute:')
print(sql3)
cursor.execute(sql1)
cursor.execute(sql2)
cursor.execute(sql3)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
#动漫-公司关系
def create_table_bangumi_company(db):
cursor=db.cursor()
table_name="bangumi_company"
sql="""create table if not exists %s(
bangumi_id int not null,
company_id int not null,
primary key (bangumi_id),
foreign key (bangumi_id) references bangumi_list(bangumi_id)
on update cascade
on delete cascade,
foreign key (company_id) references company(company_id)
on update cascade
on delete cascade) ENGINE=InnoDB DEFAULT CHARSET=utf8;"""%\
(table_name)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
#动漫-监督关系
def create_table_bangumi_conduct(db):
cursor=db.cursor()
table_name="bangumi_conduct"
sql="""create table if not exists %s(
bangumi_id int not null,
conduct_id int not null,
primary key (bangumi_id),
foreign key (bangumi_id) references bangumi_list(bangumi_id)
on update cascade
on delete cascade,
foreign key (conduct_id) references conduct(conduct_id)
on update cascade
on delete cascade) ENGINE=InnoDB DEFAULT CHARSET=utf8;"""% \
(table_name)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table %s error!'%(table_name))
traceback.print_exc()
#创用户表
def create_table_user(db):
cursor=db.cursor()
sql = """
create table if not exists user_list(
if_manager enum('Y','N') not null default 'N',
user_id int auto_increment,
name varchar(20) ,
password varchar(128) not null,
privilege char(4) not null default 'YNNN',
primary key(user_id),
unique key(name)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
print('create success !')
except:
print('create table error!')
traceback.print_exc()
# 构造与制作相关的表
def initProduceTbale(db):
create_table_conduct(db)
create_table_company(db)
create_table_bangumi_company(db)
create_table_bangumi_conduct(db)
create_table_cast(db)
if __name__ == '__main__':
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
# create_table_bangumi_list(db)
# create_table_bangumi(db,'bilibili')
# create_table_bangumi(db,"acfun")
# initProduceTbale(db)
# create_table_user(db)
# create_view_detail_info(db)
insert_bangumi(db)
insert_bangumi_info(db)
db.close()
| 30.099057
| 115
| 0.625764
|
12a708a0d1dea6789fb8125a0dc4160d0546528c
| 19,707
|
py
|
Python
|
digits/model/images/classification/views.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 2
|
2017-04-24T10:16:15.000Z
|
2019-02-26T09:36:27.000Z
|
digits/model/images/classification/views.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 1
|
2016-08-30T23:48:17.000Z
|
2016-08-30T23:48:17.000Z
|
digits/model/images/classification/views.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 3
|
2017-04-24T10:16:15.000Z
|
2019-02-26T09:36:49.000Z
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import re
import tempfile
import random
import flask
import werkzeug.exceptions
import numpy as np
import digits
from digits.config import config_value
from digits import utils
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler, autodoc
from digits.dataset import ImageClassificationDatasetJob
from digits import frameworks
from forms import ImageClassificationModelForm
from job import ImageClassificationModelJob
from digits.status import Status
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils import filesystem as fs
NAMESPACE = '/models/images/classification'
@app.route(NAMESPACE + '/new', methods=['GET'])
@autodoc('models')
def image_classification_model_new():
"""
Return a form for a new ImageClassificationModelJob
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
)
@app.route(NAMESPACE + '.json', methods=['POST'])
@app.route(NAMESPACE, methods=['POST'])
@autodoc(['models', 'api'])
def image_classification_model_create():
"""
Create a new ImageClassificationModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = ImageClassificationModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = get_standard_networks()
form.standard_networks.default = get_default_standard_network()
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/classification/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
job = None
try:
job = ImageClassificationModelJob(
name = form.model_name.data,
dataset_id = datasetJob.id(),
)
# get handle to framework object
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
if form.method.data == 'standard':
found = False
# can we find it in standard networks?
network_desc = fw.get_standard_network_desc(form.standard_networks.data)
if network_desc:
found = True
network = fw.get_network_from_desc(network_desc)
if not found:
raise werkzeug.exceptions.BadRequest(
'Unknown standard model "%s"' % form.standard_networks.data)
elif form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
network = fw.get_network_from_previous(old_job.train_task().network)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
for filename, e in old_job.train_task().snapshots:
if e == epoch:
pretrained_model = filename
break
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
break
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe_root')['multi_gpu']:
if form.select_gpus.data:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
elif form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
gpu_count = 1
selected_gpus = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job_dir = job.dir(),
dataset = datasetJob,
train_epochs = form.train_epochs.data,
snapshot_interval = form.snapshot_interval.data,
learning_rate = form.learning_rate.data,
lr_policy = policy,
gpu_count = gpu_count,
selected_gpus = selected_gpus,
batch_size = form.batch_size.data,
val_interval = form.val_interval.data,
pretrained_model= pretrained_model,
crop_size = form.crop_size.data,
use_mean = form.use_mean.data,
network = network,
random_seed = form.random_seed.data,
solver_type = form.solver_type.data,
shuffle = form.shuffle.data,
)
)
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('models_show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
def show(job):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template('models/images/classification/show.html', job=job, framework_ids = [fw.get_id() for fw in frameworks.get_frameworks()])
@app.route(NAMESPACE + '/large_graph', methods=['GET'])
@autodoc('models')
def image_classification_model_large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/images/classification/large_graph.html', job=job)
@app.route(NAMESPACE + '/classify_one.json', methods=['POST'])
@app.route(NAMESPACE + '/classify_one', methods=['POST', 'GET'])
@autodoc(['models', 'api'])
def image_classification_model_classify_one():
"""
Classify one image and return the top 5 classifications
Returns JSON when requested: {predictions: {category: confidence,...}}
"""
job = job_from_request()
image = None
if 'image_url' in flask.request.form and flask.request.form['image_url']:
image = utils.image.load_image(flask.request.form['image_url'])
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.bin')
flask.request.files['image_file'].save(outfile[1])
image = utils.image.load_image(outfile[1])
os.close(outfile[0])
os.remove(outfile[1])
else:
raise werkzeug.exceptions.BadRequest('must provide image_url or image_file')
# resize image
db_task = job.train_task().dataset.train_db_task()
height = db_task.image_dims[0]
width = db_task.image_dims[1]
image = utils.image.resize_image(image, height, width,
channels = db_task.image_dims[2],
resize_mode = db_task.resize_mode,
)
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
predictions, visualizations = None, None
predictions, visualizations = job.train_task().infer_one(image, snapshot_epoch=epoch, layers=layers)
# take top 5
if predictions:
predictions = [(p[0], round(100.0*p[1],2)) for p in predictions[:5]]
if request_wants_json():
return flask.jsonify({'predictions': predictions})
else:
return flask.render_template('models/images/classification/classify_one.html',
job = job,
image_src = utils.image.embed_image_html(image),
predictions = predictions,
visualizations = visualizations,
total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
)
@app.route(NAMESPACE + '/classify_many.json', methods=['POST'])
@app.route(NAMESPACE + '/classify_many', methods=['POST', 'GET'])
@autodoc(['models', 'api'])
def image_classification_model_classify_many():
"""
Classify many images and return the top 5 classifications for each
Returns JSON when requested: {classifications: {filename: [[category,confidence],...],...}}
"""
job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths = []
images = []
ground_truths = []
dataset = job.train_task().dataset
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+(\d+)$', line)
if match:
path = match.group(1)
ground_truth = int(match.group(2))
else:
path = line
ground_truth = None
try:
image = utils.image.load_image(path)
image = utils.image.resize_image(image,
dataset.image_dims[0], dataset.image_dims[1],
channels = dataset.image_dims[2],
resize_mode = dataset.resize_mode,
)
paths.append(path)
images.append(image)
ground_truths.append(ground_truth)
except utils.errors.LoadImageError as e:
print e
if not len(images):
raise werkzeug.exceptions.BadRequest(
'Unable to load any images from the file')
labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
if scores is None:
raise RuntimeError('An error occured while processing the images')
# take top 5
indices = (-scores).argsort()[:, :5]
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# `i` is a category in labels and also an index into scores
result.append((labels[i], round(100.0*scores[image_index, i],2)))
classifications.append(result)
# replace ground truth indices with labels
ground_truths = [labels[x] if x is not None and (0 <= x < len(labels)) else None for x in ground_truths]
if request_wants_json():
joined = dict(zip(paths, classifications))
return flask.jsonify({'classifications': joined})
else:
return flask.render_template('models/images/classification/classify_many.html',
job = job,
paths = paths,
classifications = classifications,
show_ground_truth= not(ground_truths == [None]*len(ground_truths)),
ground_truths = ground_truths
)
@app.route(NAMESPACE + '/top_n', methods=['POST'])
@autodoc('models')
def image_classification_model_top_n():
"""
Classify many images and show the top N images per category by confidence
"""
job = job_from_request()
image_list = flask.request.files['image_list']
if not image_list:
raise werkzeug.exceptions.BadRequest('File upload not found')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
if 'top_n' in flask.request.form and flask.request.form['top_n'].strip():
top_n = int(flask.request.form['top_n'])
else:
top_n = 9
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_images = int(flask.request.form['num_test_images'])
else:
num_images = None
paths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+\d+$', line)
if match:
path = match.group(1)
else:
path = line
paths.append(path)
random.shuffle(paths)
images = []
dataset = job.train_task().dataset
for path in paths:
try:
image = utils.image.load_image(path)
image = utils.image.resize_image(image,
dataset.image_dims[0], dataset.image_dims[1],
channels = dataset.image_dims[2],
resize_mode = dataset.resize_mode,
)
images.append(image)
if num_images and len(images) >= num_images:
break
except utils.errors.LoadImageError as e:
print e
if not len(images):
raise werkzeug.exceptions.BadRequest(
'Unable to load any images from the file')
labels, scores = job.train_task().infer_many(images, snapshot_epoch=epoch)
if scores is None:
raise RuntimeError('An error occured while processing the images')
indices = (-scores).argsort(axis=0)[:top_n]
results = []
for i in xrange(indices.shape[1]):
result_images = []
for j in xrange(top_n):
result_images.append(images[indices[j][i]])
results.append((
labels[i],
utils.image.embed_image_html(
utils.image.vis_square(np.array(result_images))
)
))
return flask.render_template('models/images/classification/top_n.html',
job=job,
results=results,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationDatasetJob) and (j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_standard_networks():
return [
('lenet', 'LeNet'),
('alexnet', 'AlexNet'),
#('vgg-16', 'VGG (16-layer)'), #XXX model won't learn
('googlenet', 'GoogLeNet'),
]
def get_default_standard_network():
return 'alexnet'
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, ImageClassificationModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
| 38.19186
| 205
| 0.603694
|
cf4fcac1013baff313bfa43d715b93455fa2ab6d
| 1,325
|
py
|
Python
|
Main.py
|
ashikaranth8400/Github-api-Project
|
ccf0a805c92393646447d15f3e05c7f2f046a965
|
[
"MIT"
] | 3
|
2021-01-19T06:23:09.000Z
|
2021-09-07T06:50:11.000Z
|
Main.py
|
ashikaranth8400/Github-api-Project
|
ccf0a805c92393646447d15f3e05c7f2f046a965
|
[
"MIT"
] | null | null | null |
Main.py
|
ashikaranth8400/Github-api-Project
|
ccf0a805c92393646447d15f3e05c7f2f046a965
|
[
"MIT"
] | 4
|
2020-06-05T07:24:07.000Z
|
2021-10-19T09:18:14.000Z
|
from selenium import webdriver
from time import sleep
from pass import pw,uname
import sys
class github:
def __init__(self, username, pw, pname):
self.driver = webdriver.Chrome()
self.username = username
self.driver.get("https://github.com/login")
sleep(2)
self.driver.find_element_by_id("login_field")\
.send_keys(username)
sleep(2)
self.driver.find_element_by_id("password")\
.send_keys(pw)
self.driver.find_element_by_xpath("/html/body/div[3]/main/div/form/div[4]/input[9]")\
.click()
sleep(2)
self.driver.find_element_by_xpath("/html/body/div[4]/div/aside[1]/div[2]/div[2]/div/h2/a")\
.click()
sleep(2)
self.driver.find_element_by_xpath("/html/body/div[4]/main/div/form/div[2]/auto-check/dl/dd/input")\
.send_keys(pname)
sleep(2)
self.driver.find_element_by_xpath("/html/body/div[4]/main/div/form/div[3]/button")\
.click()
sleep(2)
self.driver.find_element_by_class_name("Header-link")\
.click()
sleep(2)
self.driver.find_element_by_class_name("dropdown-item dropdown-signout")\
.click()
sleep(10)
pname = sys.argv[1]
github(uname,pw,pname)
| 32.317073
| 107
| 0.6
|
102eaa02475aacdfd4f2f515320e0430c63d07fd
| 729
|
py
|
Python
|
test/optim_rmsprop_test.py
|
jdsgomes/ClassyVision-1
|
309d4f12431c6b4d8540010a781dc2aa25fe88e7
|
[
"MIT"
] | null | null | null |
test/optim_rmsprop_test.py
|
jdsgomes/ClassyVision-1
|
309d4f12431c6b4d8540010a781dc2aa25fe88e7
|
[
"MIT"
] | null | null | null |
test/optim_rmsprop_test.py
|
jdsgomes/ClassyVision-1
|
309d4f12431c6b4d8540010a781dc2aa25fe88e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.rmsprop import RMSProp
from test.generic.optim_test_util import TestOptimizer
class TestRMSPropOptimizer(TestOptimizer, unittest.TestCase):
def _get_config(self):
return {
"name": "rmsprop",
"num_epochs": 90,
"lr": 0.1,
"momentum": 0.9,
"weight_decay": 0.0001,
"alpha": 0.9,
"eps": 1e-8,
"centered": False,
}
def _instance_to_test(self):
return RMSProp
| 26.035714
| 65
| 0.620027
|
2135993d8b08845c1803281d9aba241765d23f4d
| 2,132
|
py
|
Python
|
iris_sklearn_shared_folder.py
|
CortanaIntelligenceGallery/iris-test3
|
b7e919f5a1738ad1490e6803ae34123f505d6320
|
[
"MIT"
] | null | null | null |
iris_sklearn_shared_folder.py
|
CortanaIntelligenceGallery/iris-test3
|
b7e919f5a1738ad1490e6803ae34123f505d6320
|
[
"MIT"
] | null | null | null |
iris_sklearn_shared_folder.py
|
CortanaIntelligenceGallery/iris-test3
|
b7e919f5a1738ad1490e6803ae34123f505d6320
|
[
"MIT"
] | null | null | null |
# Please make sure scikit-learn is included the conda_dependencies.yml file.
import pickle
import sys
import os
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from azureml.logging import get_azureml_logger
# initialize the logger
run_logger = get_azureml_logger()
# create the outputs folder
os.makedirs('./outputs', exist_ok=True)
print ('Python version: {}'.format(sys.version))
print ()
# load Iris dataset
iris = load_iris()
print ('Iris dataset shape: {}'.format(iris.data.shape))
# load features and labels
X, Y = iris.data, iris.target
# change regularization rate and you will likely get a different accuracy.
reg = 0.01
# load regularization rate from argument if present
if len(sys.argv) > 1:
reg = float(sys.argv[1])
print("Regularization rate is {}".format(reg))
# log the regularization rate
run_logger.log("Regularization Rate", reg)
# train a logistic regression model
clf1 = LogisticRegression(C=1/reg).fit(X, Y)
print (clf1)
accuracy = clf1.score(X, Y)
print ("Accuracy is {}".format(accuracy))
# log accuracy
run_logger.log("Accuracy", accuracy)
print("")
print("==========================================")
print("Serialize and deserialize using the native share folder: {0}".format(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY']))
print("")
# serialize the model on disk in the private share folder.
# note this folder is NOT tracked by run history, but it survives across runs on the same compute context.
print ("Export the model to model.pkl in the native shared folder")
f = open(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] + 'model.pkl', 'wb')
pickle.dump(clf1, f)
f.close()
# load the model back from the private share folder into memory
print("Import the model from model.pkl in the native shared folder")
f2 = open(os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] + 'model.pkl', 'rb')
clf2 = pickle.load(f2)
# predict a new sample
X_new = [[3.0, 3.6, 1.3, 0.25]]
print ('New sample: {}'.format(X_new))
pred = clf2.predict(X_new)
print('Predicted class is {}'.format(pred))
| 30.457143
| 123
| 0.705441
|
d6a452a965ee46c0b54773c508cf899b09aa34e1
| 537
|
py
|
Python
|
hcphotonics/setup.py
|
sunjerry019/photonLauncher
|
5abbe6f1fcd8ba5e8a0f7790ffd0b724248292cb
|
[
"Apache-2.0"
] | 6
|
2015-11-26T15:03:38.000Z
|
2020-10-05T14:08:54.000Z
|
hcphotonics/setup.py
|
sunjerry019/photonLauncher
|
5abbe6f1fcd8ba5e8a0f7790ffd0b724248292cb
|
[
"Apache-2.0"
] | 7
|
2015-12-09T06:44:34.000Z
|
2021-12-14T15:51:28.000Z
|
hcphotonics/setup.py
|
sunjerry019/photonLauncher
|
5abbe6f1fcd8ba5e8a0f7790ffd0b724248292cb
|
[
"Apache-2.0"
] | 3
|
2016-07-25T10:43:21.000Z
|
2021-12-07T14:12:47.000Z
|
from setuptools import setup
from setuptools import find_packages
setup(name='hcphotonics',
version='0.1',
description='Utilities and tools for HC Photonics Lab',
url='https://github.com/sunjerry019/photonLauncher',
author='HC Photonics',
author_email='hcphotonics@gmail.com',
license='Apache 2.0',
packages=find_packages(),
install_requires=[
'matplotlib',
'numpy',
'pyserial',
'python-usbtmc',
'Gnuplot',
'libusb1'
]
)
| 25.571429
| 61
| 0.60149
|
fa87336d86e452ded371ba8c44bd0ab02407f061
| 1,715
|
py
|
Python
|
lib/python/flame/registry/dummy.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 6
|
2022-03-30T23:37:05.000Z
|
2022-03-31T17:29:14.000Z
|
lib/python/flame/registry/dummy.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 10
|
2022-03-31T00:03:58.000Z
|
2022-03-31T07:15:06.000Z
|
lib/python/flame/registry/dummy.py
|
GaoxiangLuo/flame
|
16bd1715a545421d45ea0fc32544e448389de49c
|
[
"Apache-2.0"
] | 2
|
2022-03-30T23:25:15.000Z
|
2022-03-30T23:55:47.000Z
|
# Copyright 2022 Cisco Systems, Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""Dummmy registry client."""
from typing import Any, Optional
from .abstract import AbstractRegistryClient
class DummyRegistryClient(AbstractRegistryClient):
"""Dummy registry client."""
def __call__(self, uri: str, job_id: str) -> None:
"""Initialize the instance."""
pass
def setup_run(self, name: str) -> None:
"""Set up a run."""
pass
def save_metrics(self, epoch: int, metrics: Optional[dict[str,
float]]) -> None:
"""Save metrics in a model registry."""
pass
def save_params(self, hyperparameters: Optional[dict[str, float]]) -> None:
"""Save hyperparameters in a model registry."""
pass
def cleanup(self) -> None:
"""Clean up resources."""
pass
def save_model(self, name: str, model: Any) -> None:
"""Save a model in a model registry."""
pass
def load_model(self, name: str, version: int) -> object:
"""Load a model from a model registry."""
pass
| 31.759259
| 79
| 0.641983
|
53b531fab15214529e2173da33e6be8a20b7dea9
| 440
|
py
|
Python
|
interviews/iqiyi/9.12/2.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
interviews/iqiyi/9.12/2.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
interviews/iqiyi/9.12/2.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
n = input()
result = []
max_iter = int((2 * n) ** 0.5)
for i in range(max_iter, 1, -1):
if i & 1:
center = n / i
if center * i == n and center - i / 2 >= 1:
result.append(range(center-i/2, center+i/2+1))
else:
center = n / i + 1
if n % i * 2 == i and center - i / 2 >= 1:
result.append(range(center-i/2, center+i/2))
for line in result:
print(" ".join(map(str, line)))
| 24.444444
| 58
| 0.486364
|
ed395823992d00253e3e55b659dc28a4f3b0e47f
| 299
|
py
|
Python
|
scrapers/exceptions.py
|
PatrickSpieker/pricesleuth
|
e9a9e5a1737c20773a2b317292b4801e41583693
|
[
"MIT"
] | null | null | null |
scrapers/exceptions.py
|
PatrickSpieker/pricesleuth
|
e9a9e5a1737c20773a2b317292b4801e41583693
|
[
"MIT"
] | null | null | null |
scrapers/exceptions.py
|
PatrickSpieker/pricesleuth
|
e9a9e5a1737c20773a2b317292b4801e41583693
|
[
"MIT"
] | null | null | null |
class MissingAttributeException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class MissingPriceException(MissingAttributeException):
def __init__(self, *args, **kwargs):
MissingAttributeException.__init__(self, *args, **kwargs)
| 37.375
| 65
| 0.729097
|
a307d8ff5899ecdce52a55253d033cd1f9b1a206
| 1,822
|
py
|
Python
|
azuresite/production.py
|
limdave/djangoapp
|
236f2da256ad2150b21e21bbc2adeac9fe6251f9
|
[
"MIT"
] | null | null | null |
azuresite/production.py
|
limdave/djangoapp
|
236f2da256ad2150b21e21bbc2adeac9fe6251f9
|
[
"MIT"
] | null | null | null |
azuresite/production.py
|
limdave/djangoapp
|
236f2da256ad2150b21e21bbc2adeac9fe6251f9
|
[
"MIT"
] | null | null | null |
from .settings import *
# Configure default domain name
ALLOWED_HOSTS = [os.environ['WEBSITE_SITE_NAME'] + '.azurewebsites.net', '127.0.0.1'] if 'WEBSITE_SITE_NAME' in os.environ else []
# WhiteNoise configuration insert comment
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# Add whitenoise middleware after the security middleware
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Configure Postgres database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['DBNAME'],
'HOST': os.environ['DBHOST'],
'USER': os.environ['DBUSER'],
'PASSWORD': os.environ['DBPASS']
}
}
| 55.212121
| 130
| 0.469265
|
18abe1de0da7d77526fff877d9b5aef7dc46e289
| 446
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/layout/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="layout", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 34.307692
| 81
| 0.656951
|
c2c96c3705fce82eebf439a2db3b0a90cf860567
| 1,258
|
py
|
Python
|
django-server/climate_commander/jobs/migrations/0020_auto_20160916_0053.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | null | null | null |
django-server/climate_commander/jobs/migrations/0020_auto_20160916_0053.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | 1
|
2016-08-03T21:49:58.000Z
|
2016-08-03T21:49:58.000Z
|
django-server/climate_commander/jobs/migrations/0020_auto_20160916_0053.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | 1
|
2016-07-13T18:19:56.000Z
|
2016-07-13T18:19:56.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0019_auto_20160824_1516'),
]
operations = [
migrations.AddField(
model_name='server',
name='crdntl_instanceip',
field=models.GenericIPAddressField(null=True),
),
migrations.AddField(
model_name='server',
name='crdntl_loginnode',
field=models.CharField(max_length=40, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_domain',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_password',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='server',
name='crdntl_user',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='server',
name='server_cpus',
field=models.IntegerField(null=True),
),
]
| 27.955556
| 61
| 0.565978
|
da1a337efcbed13cbd657058e2abc544247b6cbd
| 560
|
py
|
Python
|
polls/urls.py
|
victoralvess/getting_started_django
|
109d480d978d9c1c9b84048aa98b4e475c28cf9a
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
victoralvess/getting_started_django
|
109d480d978d9c1c9b84048aa98b4e475c28cf9a
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
victoralvess/getting_started_django
|
109d480d978d9c1c9b84048aa98b4e475c28cf9a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
# 'app_name' is used to distinguish different app templates in the project
app_name = 'polls'
# The 'name' value will be used by the {% url %} template tag
urlpatterns = [
path('', views.index, name='index'),
path('hello/<name>', views.say_hello, name='hello'),
path('goodbye/<name>', views.say_goodbye, name='goodbye'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| 40
| 74
| 0.698214
|
c30c76facd4d19c7bade5aab4b6160cdaa6af8d3
| 4,651
|
py
|
Python
|
dialogue/pytorch/seq2seq/model.py
|
ishine/nlp-dialogue
|
d47c1438cb5c45c2c2aebfb82fea92bef4c3d65c
|
[
"Apache-2.0"
] | 59
|
2021-02-15T09:15:12.000Z
|
2022-03-29T01:48:24.000Z
|
dialogue/pytorch/seq2seq/model.py
|
ishine/nlp-dialogue
|
d47c1438cb5c45c2c2aebfb82fea92bef4c3d65c
|
[
"Apache-2.0"
] | null | null | null |
dialogue/pytorch/seq2seq/model.py
|
ishine/nlp-dialogue
|
d47c1438cb5c45c2c2aebfb82fea92bef4c3d65c
|
[
"Apache-2.0"
] | 15
|
2021-02-15T09:16:04.000Z
|
2022-02-21T07:18:02.000Z
|
# Copyright 2021 DengBoCong. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""seq2seq的Pytorch实现核心core
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from typing import Tuple
from dialogue.pytorch.layers import BahdanauAttention
class Encoder(nn.Module):
""" seq2seq的encoder """
def __init__(self, vocab_size: int, embedding_dim: int, enc_units: int, num_layers: int,
dropout: float, cell_type: str = "lstm", if_bidirectional: bool = True) -> None:
"""
:param vocab_size: 词汇量大小
:param embedding_dim: 词嵌入维度
:param enc_units: encoder单元大小
:param num_layers: encoder中内部RNN层数
:param dropout: 采样率
:param if_bidirectional: 是否双向
:param cell_type: cell类型,lstm/gru, 默认lstm
:return: Seq2Seq的Encoder
"""
super(Encoder, self).__init__()
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim)
if cell_type == "lstm":
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=enc_units,
num_layers=num_layers, bidirectional=if_bidirectional)
elif cell_type == "gru":
self.rnn = nn.GRU(input_size=embedding_dim, hidden_size=enc_units,
num_layers=num_layers, bidirectional=if_bidirectional)
self.dropout = nn.Dropout(p=dropout)
def forward(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: encoder的输入
"""
inputs = self.embedding(inputs)
dropout = self.dropout(inputs)
outputs, (state, _) = self.rnn(dropout)
# 这里使用了双向GRU,所以这里将两个方向的特征层合并起来,维度将会是units * 2
state = torch.cat((state[-2, :, :], state[-1, :, :]), dim=1)
return outputs, state
class Decoder(nn.Module):
""" seq2seq的decoder
:param vocab_size: 词汇量大小
:param embedding_dim: 词嵌入维度
:param enc_units: encoder单元大小
:param dec_units: decoder单元大小
:param num_layers: encoder中内部RNN层数
:param dropout: 采样率
:param cell_type: cell类型,lstm/gru, 默认lstm
:param if_bidirectional: 是否双向
:return: Seq2Seq的Encoder
"""
def __init__(self, vocab_size: int, embedding_dim: int, enc_units: int, dec_units: int, num_layers: int,
dropout: float, cell_type: str = "lstm", if_bidirectional: bool = True) -> None:
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.attention = BahdanauAttention(enc_units=enc_units, dec_units=dec_units)
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim)
if cell_type == "lstm":
self.rnn = nn.LSTM(input_size=enc_units * 2 + embedding_dim, hidden_size=dec_units,
num_layers=num_layers, bidirectional=if_bidirectional)
elif cell_type == "gru":
self.rnn = nn.GRU(input_size=enc_units * 2 + embedding_dim, hidden_size=dec_units,
num_layers=num_layers, bidirectional=if_bidirectional)
self.fc = nn.Linear(in_features=2 * enc_units + 2 * dec_units + embedding_dim, out_features=vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs: torch.Tensor, hidden: torch.Tensor,
enc_output: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: decoder的输入
:param hidden: encoder的hidden
:param enc_output: encoder的输出
"""
embedding = self.embedding(inputs)
embedding = self.dropout(embedding)
context_vector, attention_weights = self.attention(hidden, enc_output)
rnn_input = torch.cat((embedding, torch.unsqueeze(context_vector, dim=0)), dim=-1)
rnn_output, (dec_state, _) = self.rnn(rnn_input)
output = self.fc(torch.cat((embedding, context_vector.unsqueeze(dim=0), rnn_output), dim=-1))
return output, dec_state.squeeze(0)
| 41.900901
| 111
| 0.657063
|
021c39199fba542fa60f71820b31916acab77c1f
| 2,803
|
py
|
Python
|
mne/viz/montage.py
|
DraganaMana/mne-python
|
83d48ec9e93bc176ae7fb8d000521ba3bd6b4c3c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T21:30:15.000Z
|
2020-11-05T21:30:15.000Z
|
mne/viz/montage.py
|
DraganaMana/mne-python
|
83d48ec9e93bc176ae7fb8d000521ba3bd6b4c3c
|
[
"BSD-3-Clause"
] | 2
|
2016-02-27T13:43:15.000Z
|
2018-07-18T19:44:45.000Z
|
mne/viz/montage.py
|
DraganaMana/mne-python
|
83d48ec9e93bc176ae7fb8d000521ba3bd6b4c3c
|
[
"BSD-3-Clause"
] | 1
|
2017-03-05T20:44:07.000Z
|
2017-03-05T20:44:07.000Z
|
"""Functions to plot EEG sensor montages or digitizer montages."""
from copy import deepcopy
import numpy as np
from ..utils import check_version, logger
from . import plot_sensors
def plot_montage(montage, scale_factor=20, show_names=True, kind='topomap',
show=True):
"""Plot a montage.
Parameters
----------
montage : instance of Montage or DigMontage
The montage to visualize.
scale_factor : float
Determines the size of the points.
show_names : bool
Whether to show the channel names.
kind : str
Whether to plot the montage as '3d' or 'topomap' (default).
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure object.
"""
from scipy.spatial.distance import cdist
from ..channels import Montage, DigMontage
from .. import create_info
if isinstance(montage, Montage):
ch_names = montage.ch_names
title = montage.kind
elif isinstance(montage, DigMontage):
ch_names = montage.point_names
title = None
else:
raise TypeError("montage must be an instance of "
"mne.channels.montage.Montage or"
"mne.channels.montage.DigMontage")
if kind not in ['topomap', '3d']:
raise ValueError("kind must be 'topomap' or '3d'")
if isinstance(montage, Montage): # check for duplicate labels
dists = cdist(montage.pos, montage.pos)
# only consider upper triangular part by setting the rest to np.nan
dists[np.tril_indices(dists.shape[0])] = np.nan
dupes = np.argwhere(np.isclose(dists, 0))
if dupes.any():
montage = deepcopy(montage)
n_chans = montage.pos.shape[0]
n_dupes = dupes.shape[0]
idx = np.setdiff1d(montage.selection, dupes[:, 1]).tolist()
logger.info("{} duplicate electrode labels found:".format(n_dupes))
logger.info(", ".join([ch_names[d[0]] + "/" + ch_names[d[1]]
for d in dupes]))
logger.info("Plotting {} unique labels.".format(n_chans - n_dupes))
montage.ch_names = [montage.ch_names[i] for i in idx]
ch_names = montage.ch_names
montage.pos = montage.pos[idx, :]
montage.selection = np.arange(n_chans - n_dupes)
info = create_info(ch_names, sfreq=256, ch_types="eeg", montage=montage)
fig = plot_sensors(info, kind=kind, show_names=show_names, show=show,
title=title)
collection = fig.axes[0].collections[0]
if check_version("matplotlib", "1.4"):
collection.set_sizes([scale_factor])
else:
collection._sizes = [scale_factor]
return fig
| 37.373333
| 79
| 0.613628
|
f5152685f71af5e42aaac532306fec493a571db2
| 1,381
|
py
|
Python
|
zenmoney/tag.py
|
nexter83/python-zenmoney
|
08d97b9db3805b7c631075d4562e7caaea3b3c9f
|
[
"MIT"
] | null | null | null |
zenmoney/tag.py
|
nexter83/python-zenmoney
|
08d97b9db3805b7c631075d4562e7caaea3b3c9f
|
[
"MIT"
] | null | null | null |
zenmoney/tag.py
|
nexter83/python-zenmoney
|
08d97b9db3805b7c631075d4562e7caaea3b3c9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from . import ZenObject, UUID, timestamp
class Tag(ZenObject):
'''
Zenmoney transaction tag or category, see
https://github.com/zenmoney/ZenPlugins/wiki/ZenMoney-API#tag
'''
def __init__(self,
*,
id: UUID, # UUID, string in original
changed: int = timestamp(),
user: UUID, # User.id
title: str,
parent: str,
# icon: String?
# picture: String?
# * color = (a << 24) + (r << 16) + (g << 8) + (b << 0)
# color: Int?
showIncome: bool,
showOutcome: bool,
budgetIncome: bool,
budgetOutcome: bool,
required: bool = False, # true if null o_O
**kwargs,
):
self.id = id
self.changed = changed
self.user = user
self.title = title
self.showIncome = showIncome
self.showOutcome = showOutcome
self.budgetIncome = budgetIncome
self.budgetOutcome = budgetOutcome
self.required = required
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return self.title
| 32.116279
| 72
| 0.451846
|
88ea8f4d1501d425f5323d0fa078ff3eed728512
| 1,114
|
py
|
Python
|
Tests/image_tests/renderpasses/graphs/TAA.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T17:55:25.000Z
|
2020-04-24T17:55:25.000Z
|
Tests/image_tests/renderpasses/graphs/TAA.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | 2
|
2021-03-02T10:16:06.000Z
|
2021-08-13T10:10:21.000Z
|
Tests/image_tests/renderpasses/graphs/TAA.py
|
SvenHinze/SpatioTemporalReprojection
|
3abc2964ef3adfeb10a64dfc6d06bc2ab87a5081
|
[
"BSD-3-Clause"
] | null | null | null |
from falcor import *
def render_graph_TAA():
loadRenderPassLibrary("Antialiasing.dll")
loadRenderPassLibrary("DepthPass.dll")
loadRenderPassLibrary("ForwardLightingPass.dll")
testTAA = RenderGraph("TAA")
DepthPass = RenderPass("DepthPass", {'depthFormat': ResourceFormat.D32Float})
testTAA.addPass(DepthPass, "DepthPass")
SkyBox = RenderPass("SkyBox")
testTAA.addPass(SkyBox, "SkyBox")
ForwardLightingPass = RenderPass("ForwardLightingPass", {'sampleCount': 1, 'enableSuperSampling': False})
testTAA.addPass(ForwardLightingPass, "ForwardLightingPass")
TAAPass = RenderPass("TAA")
testTAA.addPass(TAAPass, "TAA")
testTAA.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
testTAA.addEdge("DepthPass.depth", "SkyBox.depth")
testTAA.addEdge("SkyBox.target", "ForwardLightingPass.color")
testTAA.addEdge("ForwardLightingPass.color", "TAA.colorIn")
testTAA.addEdge("ForwardLightingPass.motionVecs", "TAA.motionVecs")
testTAA.markOutput("TAA.colorOut")
return testTAA
TAA = render_graph_TAA()
try: m.addGraph(TAA)
except NameError: None
| 41.259259
| 109
| 0.743268
|
669a467c6a3f1e55b33160246a7583aaac3a951c
| 29,915
|
py
|
Python
|
app/library/protocols/base.py
|
imamsolikhin/Python
|
f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5
|
[
"MIT"
] | null | null | null |
app/library/protocols/base.py
|
imamsolikhin/Python
|
f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5
|
[
"MIT"
] | null | null | null |
app/library/protocols/base.py
|
imamsolikhin/Python
|
f2ed5a848a37925bd9172f1f7484fd40f2e0a8a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Internal module of protocols with abstract base classes.
Applications should not directly import this module, expect to make subclasses.
As a reminder: all internal strings, like identifier, should be
represented in UTF-8. Use pynt.xmlns.UTF8 if you need help converting."""
# builtin modules
import types
import logging
import time
import threading # for AsyncInput
# import traceback
# local modules
import exceptions
class BaseIOInput(object):
"""Base I/O input. Abstract class, forming a third part of the BaseInput class, along with BaseLangInput and BaseCommandInput"""
timeout = 30 # default time-out in seconds
# I/O SPECIFIC METHODS
def __init__(self):
"""Prepares the actual underlying I/O, given the parameters given at initialization.
(e.g. hostname, port, filename, url, File object). If possible, delays the actual
opening of the I/O till connect() is called, so that setLoginCredentials() can be
called in the mean time."""
pass
def getTarget(self):
"""Return a human-readable identifier of the I/O object. For example, the hostname of the filename"""
return "baseIO"
def connect(self):
"""Opens the actual I/O connection to file or device. This is called, just before login() and authorize()"""
pass
def disconnect(self):
"""closes the I/O connection. You shouldn't call it more than once. Sets the actually I/O object to None, if any"""
pass
def setDefaultTimeout(self, timeout):
self.timeout = int(timeout)
def login(self):
"""Login to a terminal, using I/O specific (rather than language-specific) routines.
Uses the username and password of the BaseLanguageInput"""
pass
def sendcommand(self, string):
"""writes a command as-is to the I/O.
If you call sendcommand(), you must also call readmessage() at some point in time, to avoid
stale results."""
raise NotImplementedError("BaseIOInput.sendcommand() is an abstract method. please override in %s" % type(self).__name__)
self.writetolog(string, input=True)
logger = logging.getLogger("protocols")
logger.debug("Sending command %s" % (repr(string)))
def readmessage(self, timeout):
"""Reads text from the terminal up to the next delimiter. Does return the string as-is,
without checking validity. The result MUST be an UTF-8 encoded string.
Should raise an TimeOut in case more then timeout seconds have been passed."""
raise NotImplementedError("BaseIOInput.readmessage() is an abstract method. please override in %s" % type(self).__name__)
resultString = ""
self.writetolog(resultString, output=True)
logger = logging.getLogger("protocols")
logger.debug("Received %d bytes of data" % len(resultString))
return resultString
class BaseLangInput(object):
"""Base Language input. Abstract method, forming a third part of the BaseInput class, along with BaseIOInput and BaseCommandInput"""
username = ""
password = ""
terminator = "\r\n" # string that signifies the end of a response message
prompt = ">" # string that signifies the start of an input message
delimiter = "\r\n>" # the delimiter := terminator + prompt
# The distinction is only relevant when waiting for the first prompt or the last terminator before the EOF.
# For most languages, the prompt may be a regular expression (though this is not a requirement)
logfile = None
# LANGUAGE SPECIFIC METHODS
def authorize(self):
"""Authorize with a command, using language specific (rather than I/O-specific) routines.
May call send_and_receive(), but NOT command(), since that may be threaded."""
pass
def deauthorize(self):
"""Deauthorize, prior to disconnecting.
May call send_and_receive(), but NOT command(), since that may be threaded."""
pass
def setPrompt(self, prompt):
self.prompt = prompt
self.delimiter = self.terminator + self.prompt
logger = logging.getLogger("protocols")
logger.debug("Set delimiter to %s" % repr(self.delimiter))
def statusOK(self, status, command):
"""Checks the status. returns True is the status is a succesful,
or raises a CommandFailed, possible with additional information."""
status = bool(status)
if not status:
raise CommandFailed("Unexpected status '%s' from command '%s'" % (status, command))
def makeCommand(self, command):
"""Takes a command, and turns it into a string read to send to the device.
It may add a line break (I/O specific), or identifier in the command (language-specific).
Returns a tuple (identifier, commandstring). The identifier may be None
if there is no way to match input command and output result."""
return (None, command+"\n")
def parseMessage(self, resultString):
"""Takes a message, and parses it into a tripley (resultlines, identifier, status)
The resultline is typically an array of strings, the identifier some thing to match
the result to a given command, and the status is unspecified and is language-specific.
May raise a ParsingError in case the output can't be parsed, but does not
raise an exception if the status is unsuccessful."""
resultLines = resultString.split('\n');
return (resultLines, None, True)
def isAutonomousType(self, identifier, status):
"""Given the identifier and status, decide if the message is autonomous,
and if so, if it is of a certain type. For regular (non-autonomous), return False."""
return False
def setLoginCredentials(self, username, password):
"""set login credentials. Set password to "" if no password is required.
The username are used both for login (e.g. telnet/SSH) and authorize (e.g. TL1).
This assumes there is not overlap between login and authorize, which is practice always true."""
self.username = username
if password != None:
self.password = password
def setLogFile(self, logfile):
"""Set log file to the given path"""
assert isinstance(logfile, str)
self.logfile = file(logfile, "a")
def closeLogFile(self):
if self.logfile:
self.logfile.close()
self.logfile = None
def writetolog(self, logstring, input=False, output=False):
"""Write to log file"""
if self.logfile:
self.acquireLoglock()
if input:
self.logfile.write("\n==input==\n")
elif output:
self.logfile.write("\n==output==\n")
else:
self.logfile.write("\n==i/o==\n")
self.logfile.write(logstring)
self.releaseLoglock()
class BaseSyncInput(object):
"""Base Command input, Synchronous version.
Abstract class, forming a third part of the BaseInput class, along with BaseIOInput and BaseLangInput.
The synchronous version does not create new threads, and will only send one command at a time to the
I/O. It will block till a response is returned, and process that one."""
autocallbacks = None # Dictionary with Callback functions for autonomous messages
# COMMAND SPECIFIC METHODS
def getmessage(self, identifier, timeout):
"""Given an identifier, waits till the appropriate message is returned by the device.
This function is blocking, altough it may give a timeout, if nothing was returned in time.
Returns tuple (resultlines, status)."""
endtime = time.time() + timeout
skipcount = 0
logger = logging.getLogger("protocols")
while True:
result = self.readmessage(timeout) # may raise a TimeOut
(resultlines, residentifier, status) = self.parseMessage(result)
autotype = self.isAutonomousType(residentifier, status)
if (autotype != False):
# Autonomous message
if autotype in self.autocallbacks:
callback = self.autocallbacks[autotype]
logger.info("Sending autonomous message (type %s, identifier %s) to %s" % (autotype,residentifier,callback.__name__))
self.callback(callback, resultlines, status)
elif True in self.autocallbacks: # catch-all callback function
callback = self.autocallbacks[True]
logger.info("Sending autonomous message (type %s, identifier %s) to %s" % (autotype,residentifier,callback.__name__))
self.callback(callback, resultlines, status)
else:
logger.warning("Skipping unhandled autonomous message (type %s, identifier %s)" % (autotype,residentifier))
elif identifier == residentifier:
logger.debug("Got matching result for identifier %s" % identifier)
break
else:
skipcount += 1
logger.error("Skipping regular message with identifier %s" % (residentifier))
if time.time() > endtime:
raise exceptions.TimeOut("No reply with correct identifier %s after %d seconds (skipped %d responses)" % (identifier, timeout, skipcount))
resultlines = []
status = False
break
return (resultlines, status)
def send_and_receive(self, command, timeout):
"""Shortcut for makeCommand(), sendcommand(), readmessage(), parseMessage().
This only works for synchronous I/O. For asynchronous I/O, this function
is only be used for authorization and de-authorization. Returns a tuple (resultlines, status)."""
(identifier, string) = self.makeCommand(command)
self.sendcommand(string)
(resultlines, status) = self.getmessage(identifier, timeout=timeout)
self.statusOK(status, command)
return (resultlines, status)
def command(self, command, timeout=None):
"""The main functons of BaseInput. Takes a command, and returns the result as an array of strings.
Makes sure the result is a match of the given command, and no error status was raised.
Language, I/O, and sync/async specific."""
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.send_and_receive(command, timeout)
self.statusOK(status, command)
return resultlines
def isCorrectCallback(self, callback):
"""Verifies that the callback function has the proper format: f(lines) or f(lines, status=None).
Returns a boolean; does not raise an exception on error"""
if isinstance(callback, types.FunctionType):
argnames = callback.func_code.co_varnames
argcount = callback.func_code.co_argcount
return (argcount in [1,2])
elif isinstance(callback, types.MethodType):
argcount = callback.func_code.co_argcount
return (argcount in [2,3])
else:
return False
def hasStatusArgument(self, callback):
"""Verifies that the callback function has the proper format: f(lines) or f(lines, status=None).
Returns a boolean; does not raise an exception on error"""
if isinstance(callback, types.FunctionType):
argcount = callback.func_code.co_argcount
return (argcount == 2)
elif isinstance(callback, types.MethodType):
argcount = callback.func_code.co_argcount
return (argcount == 3)
else:
return False
def callbackCommand(self, command, callback, timeout=None):
"""The main functons of BaseInput. Takes a command, and sends the result to the
callback functions. The function returns immediately, and is mostly asynchronous,
if possible by the underlying I/O."""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.send_and_receive(command, timeout)
self.statusOK(status, command)
self.callback(callback, resultlines, status=status)
def callback(self, function, resultlines, status=None):
"""Call function with resultlines as argument. Either in a new thread or simply the current thread."""
if self.hasStatusArgument(function):
function(resultlines, status)
else:
function(resultlines)
def setAutonomousCallback(self, callback, autotype=True):
"""Set the function which is called for autonomous messages. If type is set, the function is
only called when isAutonomousType() in Language parser returns the same string"""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
if not self.autocallbacks:
self.autocallbacks = {}
assert autotype != None
logger = logging.getLogger("protocols")
logger.debug("Assigning callback function %s() to callback type %s" % (callback.__name__, autotype))
self.autocallbacks[autotype] = callback
def start(self):
"""Make sure the actual I/O for the file or device is ready. logs in, authorize.
You shouldn't call it more than once"""
logger = logging.getLogger("protocols")
logger.debug("Fetching information from %s using %s" % (self.getTarget(), type(self).__name__))
if not self.autocallbacks:
self.autocallbacks = {}
self.connect()
self.login()
self.authorize()
def stop(self):
"""Deauthorizes, logs out, and closes the I/O connection. You shouldn't call it more than once"""
self.deauthorize()
self.disconnect()
self.closeLogFile()
def acquireMemLock(self):
return True;
def releaseMemLock(self):
return True;
def acquireLoglock(self):
return True;
def releaseLoglock(self):
return True;
class BaseAsyncInput(BaseSyncInput):
"""Base Command input, Asynchronous version.
Abstract class, forming a third part of the BaseInput class, along with BaseIOInput and BaseLangInput.
The asynchronous version uses two threads: one to send commands, and one to receive them.
If command() is used, it is still blocking, but with callbackCommand() multiple commands can be send
to a device at the same time. This function is obviously thread-safe. Other Input classes wanting to
remain thread safe, should liberously call acquireIOlock() and acquireMemLock(), and release*Lock() of course"""
messages = None # dict (set in createThreads) of identifier: (status, resultlines)
callbacks = None # dict (set in createThreads) of identifier: (callback, timeout). Unset for synchronous messages.
receivethread = None # threading.Thread() object. continuously fetches information from the device.
dorun = False # signal the receivethread to keep running, or to stop.
threadedcallback = False # If True, callbacks are made in a new thread
callbackthread = None # dict of Threads
# COMMAND SPECIFIC METHODS
def send_and_receive(self, command, timeout):
"""Shortcut for makeCommand(), sendcommand(), readmessage(), parseMessage().
This only works for synchronous I/O. For asynchronous I/O, this function
is only be used for authorization and de-authorization. Returns a tuple (resultlines, status).
This function is strictly synchronous and does not directly call getmessage(), since that is asynchronous"""
(cmdidentifier, string) = self.makeCommand(command)
self.sendcommand(string)
result = self.readmessage(timeout) # may raise a TimeOut
(resultlines, residentifier, status) = self.parseMessage(result)
self.statusOK(status, command)
if cmdidentifier != residentifier:
raise CommandFailed("Result identifier %s does not match command identifier %s for command %s." % (residentifier, cmdidentifier, command))
return (resultlines, status)
def command(self, command, timeout=None):
"""The main functons of BaseInput. Takes a command, and returns the result as an array of strings.
Makes sure the result is a match of the given command, and no error status was raised.
Language, I/O, and sync/async specific."""
(identifier, string) = self.makeCommand(command)
# self.addIdentifierCallback(identifier, None, timeout)
try:
self.sendcommand(string)
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.getmessage(identifier, timeout=timeout)
self.statusOK(status, command)
except: # all exceptions, including keyboard-interupts
self.stopThreads(timeout=0)
raise
return resultlines
def callbackCommand(self, command, callback, timeout=None):
"""The main functons of BaseInput. Takes a command, and sends the result to the
callback functions. The function returns immediately, and is mostly asynchronous,
if possible by the underlying I/O."""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
try:
(identifier, string) = self.makeCommand(command)
self.addIdentifierCallback(identifier, callback, timeout)
self.sendcommand(string)
except: # all exceptions, including keyboard-interupts
self.stopThreads(timeout=0)
raise
def addIdentifierCallback(self, identifier, callback, timeout=None):
"""Adds parameters for the callback to the callbacks variable"""
if timeout == None:
timeout = self.timeout
self.acquireMemLock()
if identifier in self.callbacks:
raise NetworkException("A command with identifier %s was already sent. Can't use the same identifier more than once in asynchronous mode." % identifier)
logger = logging.getLogger("protocols")
logger.debug("Remember callback function %s() for identifier %s" % (callback.__name__, identifier))
self.callbacks[identifier] = (callback, time.time()+timeout)
self.releaseMemLock()
def getmessage(self, identifier, timeout):
"""Given an identifier, waits till the appropriate message shows up in the messages{} dictionary.
This function is blocking, altough it may give a timeout, if nothing was returned in time.
Returns tuple (resultlines, status). This function must only be called for async mode. For sync mode, call send_and_receive"""
if identifier in self.callbacks:
raise AssertionError("getmessages() should not be called with an identifier (%s) present in self.callbacks" % identifier)
endtime = time.time() + timeout
while identifier not in self.messages:
time.sleep(0.04)
if time.time() > endtime:
break
if identifier not in self.messages:
raise exceptions.TimeOut("identifier %s not found in messages within %d seconds. Available identifiers: %s" % (identifier, timeout, str(self.messages.keys())))
self.acquireMemLock()
if identifier in self.messages:
(resultlines, status) = self.messages[identifier]
del self.messages[identifier]
self.releaseMemLock()
return (resultlines, status)
def checkTimeouts(self):
"""Check if the timeouts in callbacks{} have not been passed. If it has, a result was received,
but the result was not used."""
# TODO: decide on return result. En eh, to be written too
pass
def callback(self, function, resultlines, status=None):
"""Call function with resultlines as argument. Either in a new thread or simply the current thread."""
if self.threadedcallback:
name = function.__name__ + " callback"
if self.hasStatusArgument(function):
arguments = (resultlines, status) # create a tuple
else:
arguments = (resultlines,) # create a tuple
callbackthread = threading.Thread(target=function, name=name, args=arguments)
callbackthread.start()
self.callbackthreads.append(callbackthread)
else:
if self.hasStatusArgument(function):
function(resultlines, status)
else:
function(resultlines)
def processMessage(self, message):
"""Calls parseMessage and checks the type of the message. Calls the callback function for autonomous
messages or regular results with a known callback function. Otherwise, simply add the message to
the messages dictionary, so it can be retrieved by getmessage() in another thread."""
logger = logging.getLogger("protocols")
(resultlines, identifier, status) = self.parseMessage(message)
autotype = self.isAutonomousType(identifier, status)
if (autotype != False):
# Autonomous message
if autotype in self.autocallbacks: # specific callback function
callback = self.autocallbacks[autotype]
logger.info("Sending autonomous message (type %s, identifier %s) to %s()" % (autotype,identifier,callback.__name__))
self.callback(callback, resultlines, status=status)
elif True in self.autocallbacks: # catch-all callback function
callback = self.autocallbacks[True]
logger.info("Sending autonomous message (type %s, identifier %s) to %s()" % (autotype,identifier,callback.__name__))
self.callback(callback, resultlines, status=status)
else:
logger.info("Skipping unhandled autonomous message (type %s, identifier %s)" % (autotype,identifier))
return
callback = None
self.acquireMemLock()
if identifier in self.callbacks:
# regular message, with known callback function
(callback, timeout) = self.callbacks[identifier]
del self.callbacks[identifier]
self.releaseMemLock()
if callback:
logger.info("Sending regular message with identifier %s to %s()" % (identifier,callback.__name__))
self.callback(callback, resultlines, status)
else:
# regular message
self.acquireMemLock()
if identifier in self.messages:
raise CommandFailed("Can't append result with identifier %s: a result with the same identifer already exists." % identifier)
logger.debug("Appending message result with identifier %s to messages queue" % (identifier))
self.messages[identifier] = (resultlines, status)
self.releaseMemLock()
def fetchMessages(self):
"""Function in a separate thread. Repeatedly call readmessage(timeout=infinity), and
processMessage() with ayny possible result. The thread is stopped if dorun is set to False.
Call CheckTimeouts() every once in a while"""
timeout = max(2,int(self.timeout/3)) # a short timeout (max 2 sec.), so we're quickly back in the loop
logger = logging.getLogger("protocols")
logger.debug("Asynchronously fetching messages with %0.1f second interval" % (timeout))
while (self.dorun == True) or (len(self.callbacks) > 0):
try:
message = self.readmessage(timeout=timeout)
# logger.debug("Got %d bytes of data" % (len(message)))
self.processMessage(message)
except exceptions.TimeOut:
logger.debug("Waiting for data")
pass
self.checkTimeouts()
def createThreads(self):
"""Initializes internal variables, and start listening thread. This function is called
after login() and authorize() are called."""
self.messages = {}
self.callbacks = {}
self.callbackthreads = []
name = "Thread-"+self.getTarget()+"-receiver"
self.receivethread = threading.Thread(target=self.fetchMessages, name=name)
self.dorun = True
self.receivethread.start()
def stopThreads(self, timeout=None):
# Signal thread to stop, and stop it with a timeout
logger = logging.getLogger("protocols")
self.dorun = False
if timeout == None:
timeout = 1.2*self.timeout # Add a little margin; we may have to wait for many connections..
logger.debug("Stopping receiver threads (with %d sec timeout)" % timeout)
self.receivethread.join(timeout=timeout)
logger.debug("Stopping %d parser threads (with %d sec timeout each)" % (len(self.callbackthreads), timeout))
for callbackthread in self.callbackthreads:
callbackthread.join(timeout=timeout)
if len(self.messages) > 0:
logger.error("Unprocessed messages left in queue with id %s, after stopping listener thread" % str(self.messages.keys()))
if self.receivethread.isAlive():
logger.error("Receiver thread is still active, despite an attempt to stop it.")
def acquireMemLock(self):
"""Acquires memory lock. This function can only be called after start() has been called"""
return self.memlock.acquire() # blocking
# WARNING: Function with time-out doesn't work very well, because of the delay
# (thread A never got the lock, since thread B held the lock for a long time, and
# got it back before A -- apparently it was not handed out in request order)
# gotlock = False
# endtime = time.time() + 10 # 10 sec timeout
# logger = logging.getLogger("protocols")
# (callerfilename, linenumber, callername, text) = traceback.extract_stack()[-2]
# logger.debug("Acquire memory lock id %s from %s() in file %s by thread %s" % (id(self.loglock), callername, callerfilename, threading.currentThread().getName()))
# while True:
# gotlock = self.memlock.acquire(False) # non-blocking
# if gotlock:
# break
# if time.time() > endtime:
# raise exceptions.TimeOut("Unable to get a memory lock in 10 seconds.")
# time.sleep(0.05)
# return gotlock;
def releaseMemLock(self):
"""Releases memory lock. You MUST never call releaseMemLock() if you didn't acquire it first."""
# logger = logging.getLogger("protocols")
# logger.debug("Release memory lock id %s" % id(self.memlock))
self.memlock.release()
return True;
def acquireLoglock(self):
"""Acquires I/O lock. This function can only be called after start() has been called"""
gotlock = False
endtime = time.time() + 10 # 10 sec timeout
# logger = logging.getLogger("protocols")
# logger.debug("Acquire log lock by thread %s" % (threading.currentThread().getName()))
while True:
gotlock = self.iolock.acquire(False) # non-blocking
if gotlock:
break
if time.time() > endtime:
raise exceptions.TimeOut("Thread %s is unable to get a log lock in 10 seconds." % (threading.currentThread().getName()))
time.sleep(0.05)
return gotlock;
def releaseLoglock(self):
"""Releases I/O lock. You MUST never call releaseMemLock() if you didn't acquire it first."""
# logger = logging.getLogger("protocols")
# logger.debug("Release log lock by thread %s" % (threading.currentThread().getName()))
self.iolock.release()
return True;
def start(self):
"""Make sure the actual I/O for the file or device is ready. logs in, authorize.
You shouldn't call it more than once"""
logger = logging.getLogger("protocols")
logger.debug("Fetching information asynchronous from %s using %s" % (self.getTarget(), type(self).__name__))
if not self.autocallbacks:
self.autocallbacks = {}
self.iolock = threading.Lock()
self.memlock = threading.Lock()
self.connect()
self.login()
self.authorize() # call authorize while still in sync mode. It uses send_and_receive().
self.createThreads()
def stop(self):
"""Deauthorizes, logs out, and closes the I/O connection. You shouldn't call it more than once"""
self.stopThreads()
self.deauthorize() # deauthorize used send_and_receive, and is thus synchronous
self.disconnect()
self.closeLogFile()
self.iolock = None
self.memlock = None
# Note: methods in first class override methods in later classes
class BaseInput(BaseIOInput, BaseLangInput, BaseAsyncInput):
"""A base input class, consisting of three parts working togther:
the I/O, Language and Command part"""
pass
| 50.44688
| 175
| 0.647434
|
2b837e6f058a500222fb3002be48a9c49688a239
| 4,974
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/get_named_value.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/get_named_value.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/get_named_value.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetNamedValueResult',
'AwaitableGetNamedValueResult',
'get_named_value',
]
@pulumi.output_type
class GetNamedValueResult:
"""
NamedValue details.
"""
def __init__(__self__, display_name=None, id=None, name=None, secret=None, tags=None, type=None, value=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if secret and not isinstance(secret, bool):
raise TypeError("Expected argument 'secret' to be a bool")
pulumi.set(__self__, "secret", secret)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Unique name of NamedValue. It may contain only letters, digits, period, dash, and underscore characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def secret(self) -> Optional[bool]:
"""
Determines whether the value is a secret and should be encrypted or not. Default value is false.
"""
return pulumi.get(self, "secret")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence[str]]:
"""
Optional tags that when provided can be used to filter the NamedValue list.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value of the NamedValue. Can contain policy expressions. It may not be empty or consist only of whitespace. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
"""
return pulumi.get(self, "value")
class AwaitableGetNamedValueResult(GetNamedValueResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamedValueResult(
display_name=self.display_name,
id=self.id,
name=self.name,
secret=self.secret,
tags=self.tags,
type=self.type,
value=self.value)
def get_named_value(named_value_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamedValueResult:
"""
NamedValue details.
:param str named_value_id: Identifier of the NamedValue.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['namedValueId'] = named_value_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20191201:getNamedValue', __args__, opts=opts, typ=GetNamedValueResult).value
return AwaitableGetNamedValueResult(
display_name=__ret__.display_name,
id=__ret__.id,
name=__ret__.name,
secret=__ret__.secret,
tags=__ret__.tags,
type=__ret__.type,
value=__ret__.value)
| 33.836735
| 219
| 0.631283
|
b033da0fcff65dd7f5dac36152d13b478b903f99
| 1,616
|
py
|
Python
|
app/route/notifications/provider.py
|
LifeLaboratory/rosbank_backend
|
b19aed99b7084c14f0827933b7f28d6e51de92bd
|
[
"MIT"
] | 2
|
2019-12-06T23:22:33.000Z
|
2019-12-08T07:18:31.000Z
|
app/route/notifications/provider.py
|
LifeLaboratory/rosbank_backend
|
b19aed99b7084c14f0827933b7f28d6e51de92bd
|
[
"MIT"
] | 56
|
2019-12-06T17:54:07.000Z
|
2019-12-08T04:55:24.000Z
|
app/route/notifications/provider.py
|
LifeLaboratory/rosbank_backend
|
b19aed99b7084c14f0827933b7f28d6e51de92bd
|
[
"MIT"
] | 1
|
2019-12-08T05:04:46.000Z
|
2019-12-08T05:04:46.000Z
|
from app.api.base.base_sql import Sql
class Provider:
"""
Класс для работы с уведомлеиями в бд
"""
@staticmethod
def insert_notification(args):
"""
Добавить уведомление
:param args:
:return:
"""
query = """
insert into notifications ("name", "url", "id_stories")
VALUES ('{name}', '{url}', {id_stories})
returning "id_notification"
"""
return Sql.exec(query=query, args=args)
@staticmethod
def insert_notifications_users(args):
"""
Отправить уведомление пользователю
:param args:
:return:
"""
query = """
insert into notifications_users ("id_notification", "id_user", "status", "time", "active")
VALUES ('{id_notification}', '{id_user}', '{status}', NOW(), True)
"""
return Sql.exec(query=query, args=args)
@staticmethod
def get_notifications(args):
"""
Получить уведомления для пользователя
:param args:
:return:
"""
query = """
select notifications.* from notifications_users
join notifications on notifications.id_notification = notifications_users.id_notification
where id_user = {id_user} {active} and NOW() > time
"""
return Sql.exec(query=query, args=args)
@staticmethod
def update_profile(args):
"""
Обновить профиль пользователя
:param args:
:return:
"""
query = """
update profile
set "description" = '{description}'
where "id_profile" = {id_profile}
"""
return Sql.exec(query=query, args=args)
| 26.064516
| 93
| 0.592822
|
2fc88b34bebd049753ba5018fc0d387389be541a
| 11,184
|
py
|
Python
|
library/bigip_device_dns.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 12
|
2016-12-29T16:09:21.000Z
|
2019-06-29T14:12:17.000Z
|
library/bigip_device_dns.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 24
|
2017-05-24T07:56:56.000Z
|
2017-11-30T09:31:56.000Z
|
library/bigip_device_dns.py
|
Larsende/f5_ansible
|
93b0747ba663128e2c8dfc456dad4653cdde4f38
|
[
"Apache-2.0"
] | 26
|
2017-05-31T17:15:32.000Z
|
2021-03-29T03:45:06.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_dns
short_description: Manage BIG-IP device DNS settings
description:
- Manage BIG-IP device DNS settings
version_added: "2.2"
options:
cache:
description:
- Specifies whether the system caches DNS lookups or performs the
operation each time a lookup is needed. Please note that this applies
only to Access Policy Manager features, such as ACLs, web application
rewrites, and authentication.
default: disable
choices:
- enabled
- disabled
name_servers:
description:
- A list of name servers that the system uses to validate DNS lookups
forwarders:
deprecated: Deprecated in 2.4. Use the GUI or edit named.conf.
description:
- A list of BIND servers that the system can use to perform DNS lookups
search:
description:
- A list of domains that the system searches for local domain lookups,
to resolve local host names.
ip_version:
description:
- Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
choices:
- 4
- 6
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
default: present
choices:
- absent
- present
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set the DNS settings on the BIG-IP
bigip_device_dns:
name_servers:
- 208.67.222.222
- 208.67.220.220
search:
- localdomain
- lab.local
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
cache:
description: The new value of the DNS caching
returned: changed
type: string
sample: enabled
name_servers:
description: List of name servers that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
search:
description: List of search domains that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
ip_version:
description: IP version that was set that DNS will specify IP addresses in
returned: changed
type: int
sample: 4
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'dhclient.mgmt': 'dhcp',
'dns.cache': 'cache',
'nameServers': 'name_servers',
'include': 'ip_version'
}
api_attributes = [
'nameServers', 'search', 'include'
]
updatables = [
'cache', 'name_servers', 'search', 'ip_version'
]
returnables = [
'cache', 'name_servers', 'search', 'ip_version'
]
absentables = [
'name_servers', 'search'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def search(self):
result = []
if self._values['search'] is None:
return None
for server in self._values['search']:
result.append(str(server))
return result
@property
def name_servers(self):
result = []
if self._values['name_servers'] is None:
return None
for server in self._values['name_servers']:
result.append(str(server))
return result
@property
def cache(self):
if str(self._values['cache']) in ['enabled', 'enable']:
return 'enable'
else:
return 'disable'
@property
def dhcp(self):
valid = ['enable', 'enabled']
return True if self._values['dhcp'] in valid else False
@property
def forwarders(self):
if self._values['forwarders'] is None:
return None
else:
raise F5ModuleError(
"The modifying of forwarders is not supported."
)
@property
def ip_version(self):
if self._values['ip_version'] in [6, '6', 'options inet6']:
return "options inet6"
elif self._values['ip_version'] in [4, '4', '']:
return ""
else:
return None
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
want_keys = ['dns.cache']
result = dict()
dbs = self.client.api.tm.sys.dbs.get_collection()
for db in dbs:
if db.name in want_keys:
result[db.name] = db.value
dns = self.client.api.tm.sys.dns.load()
attrs = dns.attrs
if 'include' not in attrs:
attrs['include'] = 4
result.update(attrs)
return Parameters(result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
tx = self.client.api.tm.transactions.transaction
with BigIpTxContext(tx) as api:
cache = api.tm.sys.dbs.db.load(name='dns.cache')
dns = api.tm.sys.dns.load()
# Empty values can be supplied, but you cannot supply the
# None value, so we check for that specifically
if self.want.cache is not None:
cache.update(value=self.want.cache)
if params:
dns.update(**params)
def _absent_changed_options(self):
changed = {}
for key in Parameters.absentables:
if getattr(self.want, key) is not None:
set_want = set(getattr(self.want, key))
set_have = set(getattr(self.have, key))
set_new = set_have - set_want
if set_new != set_have:
changed[key] = list(set_new)
if changed:
self.changes = Parameters(changed)
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.client.check_mode:
return True
self.absent_on_device()
return True
def absent_on_device(self):
params = self.changes.api_params()
tx = self.client.api.tm.transactions.transaction
with BigIpTxContext(tx) as api:
dns = api.tm.sys.dns.load()
dns.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
cache=dict(
required=False,
choices=['disabled', 'enabled', 'disable', 'enable'],
default=None
),
name_servers=dict(
required=False,
default=None,
type='list'
),
forwarders=dict(
required=False,
default=None,
type='list'
),
search=dict(
required=False,
default=None,
type='list'
),
ip_version=dict(
required=False,
default=None,
choices=[4, 6],
type='int'
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.required_one_of = [
['name_servers', 'search', 'forwarders', 'ip_version', 'cache']
]
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 28.313924
| 91
| 0.587804
|
4edee4c3e80587cd1b32084e655c43d47612a75e
| 2,460
|
py
|
Python
|
main.py
|
martinetmayank/telegram-chats
|
ad79f3357d657415f57c83f219fc3ad7d57081eb
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
martinetmayank/telegram-chats
|
ad79f3357d657415f57c83f219fc3ad7d57081eb
|
[
"Apache-2.0"
] | 1
|
2021-04-30T21:26:01.000Z
|
2021-04-30T21:26:01.000Z
|
main.py
|
martinetmayank/telegram-chats
|
ad79f3357d657415f57c83f219fc3ad7d57081eb
|
[
"Apache-2.0"
] | null | null | null |
from open_config_files import open_config
from write_message import write_file
from telethon import TelegramClient
from telethon.errors import SessionPasswordNeededError
from telethon.tl.functions.messages import GetHistoryRequest
from telethon.tl.types import PeerChannel
async def main(phone):
await client.start()
print("Client Created")
if await client.is_user_authorized() == False:
await client.send_code_request(phone)
try:
await client.sign_in(phone, input('Password: '))
except SessionPasswordNeededError:
await client.sign_in(phone, input('Password: '))
user_input_channel = 'https://t.me/IntegratedThoughtsQuotes'
# user_input_channel = input('Channel Entity (URL or ID): ')
if user_input_channel.isdigit():
entity = PeerChannel(int(user_input_channel))
else:
entity = user_input_channel
# Converting the entity into valid Telegram User | Channel | Group | Chat.
channel = await client.get_entity(entity)
# Setting parameters.
offset_id = 0
limit = 100
all_messages = list()
total_messages = 0
total_count_limit = 0
while True:
print("Current Offset ID is:", offset_id,
"; Total Messages:", total_messages)
history = await client(GetHistoryRequest(
peer=channel,
offset_id=offset_id,
offset_date=None,
add_offset=0,
limit=limit,
max_id=0,
min_id=0,
hash=0
))
if not history.messages:
break
messages = history.messages
for message in messages:
dict_msg = message.to_dict()
# print(dict_msg)
for key in dict_msg:
if str(key) == 'message':
all_messages.append(dict_msg[key])
offset_id = messages[len(messages) - 1].id
total_messages = len(all_messages)
if total_count_limit != 0 and total_messages >= total_count_limit:
break
for line in all_messages:
write_file(str(all_messages.index(line) + 1), line)
if __name__ == '__main__':
api_id, api_hash, username, phone = open_config()
client = TelegramClient(username, api_id, api_hash)
with client:
client.loop.run_until_complete(main(phone))
| 27.333333
| 79
| 0.610976
|
038a3e7537210deacb193d030f094de7d0b2efbd
| 6,264
|
py
|
Python
|
koufopanos_1991.py
|
megcrow/kinetic-schemes
|
d1f5c5cc95481554e2d68a69b3b8663f9501df3d
|
[
"MIT"
] | 7
|
2018-10-25T15:38:39.000Z
|
2022-03-12T02:38:30.000Z
|
koufopanos_1991.py
|
megcrow/kinetic-schemes
|
d1f5c5cc95481554e2d68a69b3b8663f9501df3d
|
[
"MIT"
] | 1
|
2020-03-02T03:05:04.000Z
|
2020-03-02T18:27:31.000Z
|
koufopanos_1991.py
|
megcrow/kinetic-schemes
|
d1f5c5cc95481554e2d68a69b3b8663f9501df3d
|
[
"MIT"
] | 5
|
2018-12-23T22:10:03.000Z
|
2020-08-08T09:01:39.000Z
|
"""
Plot yields from primary and secondary reactions as determined by the
Koufopanos 1991 kinetic scheme for biomass pyrolysis. Note that this scheme
focuses on wood conversion and char yield. Product of volatiles and gas is
lumped together as (V+G) so individual tar and gas component is not provided.
Reference:
Koufopanos, 1991. The Canadian Journal of Chemical Engineering, 69, pp 907–915.
"""
import numpy as np
import matplotlib.pyplot as py
# Parameters
# ------------------------------------------------------------------------------
T = 773 # temperature for rate constants, K
dt = 0.01 # time step, delta t
tmax = 25 # max time, s
t = np.linspace(0, tmax, num=tmax/dt) # time vector
nt = len(t) # total number of time steps
# Function for Koufopanos 1991 Kinetic Scheme
# ------------------------------------------------------------------------------
def koufopanos(B, VG1, C1, VG2, C2, T, dt, s=1):
"""
Primary and secondary kinetic reactions from Koufopanos 1991 paper. Notice
that volatiles and gases are grouped together as (Volatiles + Gases) which
are labeled here as VG.
Parameters
----------
B = biomass concentration
VG1 = (volatiles + gas)1 concentration
C1 = char1 concentration
VG2 = (volatiles + gas)2 concentration
C2 = char2 concentration
dt = time step, s
s = 1 primary reactions only, 2 primary and secondary reactions
Returns
-------
nB = new biomass concentration
nVG1 = new (volatiles + char)1 concentration
nC1 = new char1 concentration
nVG2 = new (volatiles + gas)2 concentration
nC2 = new char2 concentration
"""
# A as pre-factor (1/s) and E as activation energy (kJ/mol)
A1 = 9.973e-5; G1 = 17254.4; L1 = -9061227 # biomass -> (volatiles + gases)1
A2 = 1.068e-3; G2 = 10224.4; L2 = -6123081 # biomass -> char1
A3 = 5.7e5; E3 = 81 # (vol+gases)1 -> (vol+gases)2 + char2
R = 0.008314 # universal gas constant, kJ/mol*K
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp((G1 / T) + (L1 / T**2)) # biomass -> (volatiles + gases)1
K2 = A2 * np.exp((G2 / T) + (L2 / T**2)) # biomass -> char1
K3 = A3 * np.exp(-E3 / (R * T)) # (vol+gases)1 -> (vol+gases)2 + char2
if s == 1:
# primary reactions only
rB = -(K1+K2)*B # biomass rate
rVG1 = K1*B # (volatiles + gases)1 rate
rC1 = K2*B # char1 rate
rVG2 = 0 # (volatiles + gases)2 rate
rC2 = 0 # char2 rate
nB = B + rB*dt # update biomass concentration
nVG1 = VG1 + rVG1*dt # update (volatiles + gases)1 concentration
nC1 = C1 + rC1*dt # update char1 concentration
nVG2 = VG2 + rVG2*dt # update (volatiles + gases)2 concentration
nC2 = C2 + rC2*dt # update char2 concentration
elif s == 2:
# primary and secondary reactions
rB = -(K1+K2)*B # biomass rate
rVG1 = K1*B # volatiles + gases)1 rate
rC1 = K2*B - K3*C1 # char1 rate
rVG2 = K3*C1 # (volatiles + gases)2 rate
rC2 = K3*C1 # char2 rate
nB = B + rB*dt # update biomass concentration
nVG1 = VG1 + rVG1*dt # update (volatiles + gases)1 concentration
nC1 = C1 + rC1*dt # update char1 concentration
nVG2 = VG2 + rVG2*dt # update (volatiles + gases)2 concentration
nC2 = C2 + rC2*dt # update char2 concentration
return nB, nVG1, nC1, nVG2, nC2
# Product from Kinetic Scheme
# ------------------------------------------------------------------------------
# Assume initial concentration of B(0) = 1 and everything else initially at zero
# such as VG(0) = C(0) = 0 where VG is (Volatiles + Gases) and C is Char.
# concentrations reported on a mass basis in kg/m^3
# pw = wood concentration, pg = gas concentration, pt = tar concentration,
# pc = char concentration
# store concentrations from primary reactions at each time step
B = np.ones(nt) # biomass concentration vector
VG1 = np.zeros(nt) # (volatiles + gases)1 concentration vector
C1 = np.zeros(nt) # char1 concentration vector
VG2 = np.zeros(nt) # (volatiles + gases)2 concentration vector
C2 = np.zeros(nt) # char2 concentration vector
# store concentrations from primary and secondary reactions at each time step
B_2 = np.ones(nt) # biomass concentration vector
VG1_2 = np.zeros(nt) # (volatiles + gases)1 concentration vector
C1_2 = np.zeros(nt) # char1 concentration vector
VG2_2 = np.zeros(nt) # (volatiles + gases)2 concentration vector
C2_2 = np.zeros(nt) # char2 concentration vector
# products from primary reactions only
for i in range(1, nt):
B[i], VG1[i], C1[i], VG2[i], C2[i] = koufopanos(B[i-1], VG1[i-1], C1[i-1], VG2[i-1], C2[i-1], T, dt)
# products from primary and secondary reactions
for i in range(1, nt):
B_2[i], VG1_2[i], C1_2[i], VG2_2[i], C2_2[i] = koufopanos(B_2[i-1], VG1_2[i-1], C1_2[i-1], VG2_2[i-1], C2_2[i-1], T, dt, s=2)
# totals from primary reactions only
pvg = VG1 + VG2
pc = C1 + C2
# totals from primary and secondary reactions, assume VG1 -> (VG + C)2 where
# components in the group (VG + C)2 = 1/2*VG2 + 1/2*C2
pvg_2 = VG1_2 + 0.5*VG2_2
pc_2 = C1_2 + 0.5*C2_2
# mass balance to check results
mt = B + pvg + pc
mt2 = B_2 + pvg_2 + pc_2
# Plot Results
# ------------------------------------------------------------------------------
py.ion()
py.close('all')
py.figure(1)
py.plot(t, B, lw=2, label='B')
py.plot(t, pvg, lw=2, label='(V+G)$_1$')
py.plot(t, pc, lw=2, label='Char$_1$')
py.title('Koufopanos 1991 primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Concentration (normalized mass basis)')
py.legend(loc='best', numpoints=1)
py.grid()
py.figure(2)
py.plot(t, B_2, lw=2, label='B')
py.plot(t, pvg_2, lw=2, label='(V+G)')
py.plot(t, pc_2, lw=2, label='Char')
py.title('Koufopanos 1991 primary and secondary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Concentration (normalized mass basis)')
py.legend(loc='best', numpoints=1)
py.grid()
| 39.396226
| 129
| 0.589879
|
9fa56c2d7c55b5a48c4503f094831a63d4bff1e5
| 2,438
|
py
|
Python
|
ingestion/examples/airflow/dags/airflow_metadata_to_es.py
|
mosiac1/OpenMetadata
|
21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681
|
[
"Apache-2.0"
] | 1
|
2022-03-25T19:17:49.000Z
|
2022-03-25T19:17:49.000Z
|
ingestion/examples/airflow/dags/airflow_metadata_to_es.py
|
mosiac1/OpenMetadata
|
21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681
|
[
"Apache-2.0"
] | null | null | null |
ingestion/examples/airflow/dags/airflow_metadata_to_es.py
|
mosiac1/OpenMetadata
|
21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from datetime import timedelta
from airflow import DAG
try:
from airflow.operators.python import PythonOperator
except ModuleNotFoundError:
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from metadata.ingestion.api.workflow import Workflow
default_args = {
"owner": "user_name",
"retries": 3,
"retry_delay": timedelta(minutes=2),
"execution_timeout": timedelta(minutes=60),
"schedule_interval": "0 */1 * * *",
}
config = """
{
"source": {
"type": "metadata_elasticsearch",
"serviceName": "openMetadata",
"serviceConnection": {
"config":{
"type":"MetadataES",
"includeTables": "true",
"includeUsers": "true",
"includeTopics": "true",
"includeDashboards": "true",
"limitRecords": 10
}
},
"sourceConfig":{"config":{}}
},
"sink": {
"type": "elasticsearch",
"config": {
"index_tables": "true",
"index_topics": "true",
"index_dashboards": "true",
"es_host": "localhost",
"es_port": 9200
}
},
"workflowConfig": {
"openMetadataServerConfig": {
"hostPort": "http://localhost:8585/api",
"authProvider": "no-auth"
}
}
}
"""
def metadata_ingestion_workflow():
workflow_config = json.loads(config)
workflow = Workflow.create(workflow_config)
workflow.execute()
workflow.raise_from_status()
workflow.print_status()
workflow.stop()
with DAG(
"index_metadata",
default_args=default_args,
description="An example DAG which runs a OpenMetadata ingestion workflow",
start_date=days_ago(1),
is_paused_upon_creation=True,
catchup=False,
) as dag:
ingest_task = PythonOperator(
task_id="ingest_using_recipe",
python_callable=metadata_ingestion_workflow,
)
| 26.5
| 78
| 0.673503
|
e3e298ca688a26db589b69abd74fbca8a2f67532
| 419
|
py
|
Python
|
demo/with_wsgi/ops/tornado.py
|
marco-souza/falsy
|
0dd6f792061ee833f308372f0204dea0ad9bf2d1
|
[
"MIT"
] | 127
|
2017-01-05T14:55:50.000Z
|
2022-02-01T06:02:49.000Z
|
demo/with_wsgi/ops/tornado.py
|
marco-souza/falsy
|
0dd6f792061ee833f308372f0204dea0ad9bf2d1
|
[
"MIT"
] | 9
|
2017-03-08T21:38:16.000Z
|
2022-02-01T06:44:09.000Z
|
demo/with_wsgi/ops/tornado.py
|
marco-souza/falsy
|
0dd6f792061ee833f308372f0204dea0ad9bf2d1
|
[
"MIT"
] | 26
|
2017-01-12T08:56:15.000Z
|
2022-02-01T06:02:52.000Z
|
import tornado.web
import tornado.wsgi
class TornadoHandler(tornado.web.RequestHandler):
def get(self):
self.write("<h1 style='color:blue'>Hello Tornado!</h1>")
PRE_TORNADO = 'tornado'
def pre_tornado(route):
return '/' + PRE_TORNADO + '/' + route.lstrip('/')
application = tornado.web.Application([
(pre_tornado('test'), TornadoHandler),
])
tornado_app = tornado.wsgi.WSGIAdapter(application)
| 23.277778
| 64
| 0.704057
|
d68187f30d94c0349e2126f6a43fbdddae9a4195
| 1,069
|
py
|
Python
|
tests/config_test.py
|
xbraininc/viraal
|
4fe07e231a434eea4dbc1b6c3d60ed631e03dc6b
|
[
"Apache-2.0"
] | 8
|
2020-06-02T08:46:02.000Z
|
2021-10-13T08:00:12.000Z
|
tests/config_test.py
|
xbraininc/viraal
|
4fe07e231a434eea4dbc1b6c3d60ed631e03dc6b
|
[
"Apache-2.0"
] | null | null | null |
tests/config_test.py
|
xbraininc/viraal
|
4fe07e231a434eea4dbc1b6c3d60ed631e03dc6b
|
[
"Apache-2.0"
] | 2
|
2020-06-19T01:50:58.000Z
|
2021-03-08T18:33:11.000Z
|
from viraal.config import pass_conf, call_if
from omegaconf import OmegaConf
import logging
logger = logging.getLogger(__name__)
def f(*args, **kwargs):
return args, kwargs
class TestConfigUtils:
def test_pass_conf(self):
conf = OmegaConf.create({'config':{'f':{'c': 2, 'd': 10}}})
args, kwargs = pass_conf(f, conf, 'config.f')(999,42)
assert args[0] == 999 and args[1] == 42 and kwargs['c'] == 2 and kwargs['d'] == 10
args, kwargs = pass_conf(f, conf, 'config.f')(999,42, c=-10)
assert args[0] == 999 and args[1] == 42 and kwargs['c'] == -10 and kwargs['d'] == 10
def test_call_if(self):
l = [0,1]
@call_if(True)
def test1(a,b):
l[0] = a
l[1] = b
return a,b
r = test1(1,2)
assert test1(1,2) == (1,2) and l[0] == 1 and l[1] == 2
l = [0,1]
@call_if(False)
def test2(a,b):
l[0] = a
l[1] = b
return a,b
assert test2(1,2) is None and l[0] == 0 and l[1] == 1
| 28.891892
| 92
| 0.507951
|
180bd99884ce9f901d6431310e4271ba3d5d7219
| 303
|
py
|
Python
|
models/Scalar_U_v3_UFO/CT_couplings.py
|
jlrainbolt/MG5_v2_6_1
|
241f0c6f309342d6e8b813284467b2edd393c7d6
|
[
"NCSA"
] | null | null | null |
models/Scalar_U_v3_UFO/CT_couplings.py
|
jlrainbolt/MG5_v2_6_1
|
241f0c6f309342d6e8b813284467b2edd393c7d6
|
[
"NCSA"
] | null | null | null |
models/Scalar_U_v3_UFO/CT_couplings.py
|
jlrainbolt/MG5_v2_6_1
|
241f0c6f309342d6e8b813284467b2edd393c7d6
|
[
"NCSA"
] | null | null | null |
# This file was automatically created by FeynRules 2.3.29
# Mathematica version: 10.3.0 for Mac OS X x86 (64-bit) (October 9, 2015)
# Date: Fri 16 Feb 2018 10:57:27
from object_library import all_couplings, Coupling
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
| 25.25
| 80
| 0.749175
|
2b973d315f46c9c5e8845b81fcb207cbd34a87b7
| 464
|
py
|
Python
|
Chapter04/namedtuple_sales.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 13
|
2018-06-21T01:44:49.000Z
|
2021-12-01T10:49:53.000Z
|
Chapter04/namedtuple_sales.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | null | null | null |
Chapter04/namedtuple_sales.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 6
|
2018-10-05T08:29:24.000Z
|
2022-01-11T14:49:50.000Z
|
In [12]: salesReceipt = collections.namedtuple("salesReceipt", ["storeID", "saleDate", "saleAmount", "totalGuests"])
In [13]: store22 = salesReceipt(22, "12-14-2017", 45.32, 3)
In [14]: store15 = salesReceipt(15, "12-14-2017", 22.50, 1)
In [15]: print("Store ID = ", store22.storeID)
Store ID = 22
In [16]: print("Sales amount = ", store15.saleAmount)
Sales amount = 22.5
In [17]: for i in store22:
...: print(i)
...:
22
12-14-2017
45.32
3
| 23.2
| 116
| 0.622845
|
965863c8d420e2073fd710c8b9a3410adc778bcf
| 3,195
|
py
|
Python
|
tests/fire_groups/test_cone_of_fire.py
|
spascou/ps2-analysis
|
00f99b009d15d4c401a3338ddd0408ac7eedcc0b
|
[
"MIT"
] | 2
|
2020-06-25T17:19:05.000Z
|
2020-10-13T06:08:39.000Z
|
tests/fire_groups/test_cone_of_fire.py
|
spascou/ps2-analysis
|
00f99b009d15d4c401a3338ddd0408ac7eedcc0b
|
[
"MIT"
] | null | null | null |
tests/fire_groups/test_cone_of_fire.py
|
spascou/ps2-analysis
|
00f99b009d15d4c401a3338ddd0408ac7eedcc0b
|
[
"MIT"
] | null | null | null |
from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire
def test_min_cof_angle():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.min_cof_angle(moving=False) == 2.0
assert cof.min_cof_angle(moving=True) == 4.0
def test_max_cof_angle():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.max_cof_angle(moving=False) == 4.0
assert cof.max_cof_angle(moving=True) == 8.0
def test_apply_bloom():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.apply_bloom(current=1.0, moving=False) == 1.1
assert cof.apply_bloom(current=2.0, moving=False) == 2.1
assert cof.apply_bloom(current=3.9, moving=False) == 4.0
assert cof.apply_bloom(current=4.0, moving=False) == 4.0
assert cof.apply_bloom(current=4.1, moving=False) == 4.0
def test_recover():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=1.0,
pellet_spread=0.0,
)
assert cof.recover(current=2.0, time=10) == 1.9
assert cof.recover(current=2.0, time=50) == 1.5
assert cof.recover(current=2.0, time=100) == 1.0
assert cof.recover(current=2.0, time=200) == 1.0
assert cof.recover(current=2.0, time=300) == 1.0
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=0.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=1.0,
pellet_spread=0.0,
)
assert cof.recover(current=2.0, time=1000) == 2.0
def test_recover_time():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.recover_time(current=2.0) == 200
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=0.0,
recovery_delay=100,
multiplier=1.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.recover_time(current=2.0) == -1
def test_max_recover_time():
cof: ConeOfFire = ConeOfFire(
max_angle=2.0,
min_angle=1.0,
bloom=0.1,
recovery_rate=10.0,
recovery_delay=100,
multiplier=2.0,
moving_multiplier=2.0,
pellet_spread=0.0,
)
assert cof.max_recover_time(moving=False) == 400
assert cof.max_recover_time(moving=True) == 800
| 24.767442
| 60
| 0.598748
|
de2f3fc9a6823533047ccbc8f88278a10065156f
| 4,478
|
py
|
Python
|
configs/fcos/ddb_v3_no_improvement_r50_weighted_loss_bbox_consistency_warmup_caffe_fpn_gn_1x_4gpu_nvidia.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
configs/fcos/ddb_v3_no_improvement_r50_weighted_loss_bbox_consistency_warmup_caffe_fpn_gn_1x_4gpu_nvidia.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
configs/fcos/ddb_v3_no_improvement_r50_weighted_loss_bbox_consistency_warmup_caffe_fpn_gn_1x_4gpu_nvidia.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='DDBV3NPHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
apply_conditional_consistency_on_regression=False,
mask_origin_bbox_loss=False,
origin_bbox_loss_downgrade=False,
iou_delta = 0,
apply_iou_cache=False,
consistency_weight=False,
consistency_warmup=600,
hook_debug=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_sorted_bbox=dict(type='IoULoss', loss_weight=1.0),
bd_threshold=0.0,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_dist_scores=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/coco/data/2017/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'images/train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.256757
| 75
| 0.623493
|
a9fce999914867ceb037d354f9be935bd891430e
| 917
|
py
|
Python
|
monasca_persister/repositories/abstract_repository.py
|
openstack/monasca-persister
|
49e654a3c914122d352c276aea888ce160786e31
|
[
"Apache-2.0"
] | 25
|
2015-10-18T02:54:55.000Z
|
2020-01-05T00:03:33.000Z
|
monasca_persister/repositories/abstract_repository.py
|
openstack/monasca-persister
|
49e654a3c914122d352c276aea888ce160786e31
|
[
"Apache-2.0"
] | 4
|
2016-06-13T22:05:06.000Z
|
2021-06-04T06:06:06.000Z
|
monasca_persister/repositories/abstract_repository.py
|
openstack/monasca-persister
|
49e654a3c914122d352c276aea888ce160786e31
|
[
"Apache-2.0"
] | 24
|
2015-10-20T13:31:19.000Z
|
2021-11-19T09:30:26.000Z
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class AbstractRepository(object, metaclass=abc.ABCMeta):
def __init__(self):
super(AbstractRepository, self).__init__()
@abc.abstractmethod
def process_message(self, message):
pass
@abc.abstractmethod
def write_batch(self, data_points):
pass
| 30.566667
| 70
| 0.742639
|
56cf379510c331643250586b8f8564e6f8477813
| 19,375
|
py
|
Python
|
tensorflow/models/embedding/word2vec.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 23
|
2016-02-04T21:08:43.000Z
|
2022-01-14T13:22:33.000Z
|
tensorflow/models/embedding/word2vec.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 2
|
2016-05-31T16:38:55.000Z
|
2018-12-30T20:17:05.000Z
|
tensorflow/models/embedding/word2vec.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 20
|
2016-02-15T17:31:02.000Z
|
2020-01-12T08:18:48.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval.")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval.")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path,
graph_def=self._session.graph_def)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
opts.save_path + "model",
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy('france', 'paris', 'russia')
# [1]: model.nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
| 36.625709
| 80
| 0.648103
|
4f3bfc71c8cbbdb514ffc9e35b587f8d9cfca816
| 823
|
py
|
Python
|
Examples/AdvancedUsage/Verify/VerifyDigital.py
|
groupdocs-signature-cloud/groupdocs-signature-cloud-python-samples
|
ff2de5acfc00421b2ccf3ae5ddb199261f2f5449
|
[
"MIT"
] | null | null | null |
Examples/AdvancedUsage/Verify/VerifyDigital.py
|
groupdocs-signature-cloud/groupdocs-signature-cloud-python-samples
|
ff2de5acfc00421b2ccf3ae5ddb199261f2f5449
|
[
"MIT"
] | null | null | null |
Examples/AdvancedUsage/Verify/VerifyDigital.py
|
groupdocs-signature-cloud/groupdocs-signature-cloud-python-samples
|
ff2de5acfc00421b2ccf3ae5ddb199261f2f5449
|
[
"MIT"
] | 2
|
2018-12-20T11:12:08.000Z
|
2019-07-08T13:01:22.000Z
|
# Import modules
from groupdocs_signature_cloud import *
import groupdocs_signature_cloud
from Common import Common
class VerifyDigital:
@classmethod
def Run(cls):
# Create instance of the API
api = groupdocs_signature_cloud.SignApi.from_config(Common.GetConfig())
fileInfo = FileInfo()
fileInfo.file_path = "signaturedocs\\signedDigitalOne_page.docx"
opts = VerifyDigitalOptions()
opts.signature_type = 'Digital'
opts.page = 1
opts.all_pages = True
settings = VerifySettings()
settings.options = [opts]
settings.file_info = fileInfo
request = VerifySignaturesRequest(settings)
response = api.verify_signatures(request)
print("IsSuccess: " + str(response.is_success))
| 28.37931
| 79
| 0.658566
|
20cc3c70e256c45b112ee64c3a9c6b23cf3370f9
| 11,677
|
py
|
Python
|
ajax_datatable/columns.py
|
martimarkov/django-ajax-datatable
|
d132504a199cb2afe2cfd74a2e6d5d5f2969c4a4
|
[
"MIT"
] | null | null | null |
ajax_datatable/columns.py
|
martimarkov/django-ajax-datatable
|
d132504a199cb2afe2cfd74a2e6d5d5f2969c4a4
|
[
"MIT"
] | null | null | null |
ajax_datatable/columns.py
|
martimarkov/django-ajax-datatable
|
d132504a199cb2afe2cfd74a2e6d5d5f2969c4a4
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils.translation import gettext_lazy as _
from django.db import models
from .exceptions import ColumnOrderError
from .utils import format_datetime
from django.utils.html import strip_tags
from .app_settings import STRIP_HTML_TAGS
class Column(object):
def __init__(self, model_field, sort_field=None, allow_choices_lookup=True):
try:
self.name = model_field.name
self.sort_column_name = sort_field or model_field.name
self.model_field = model_field
choices = model_field.choices
if allow_choices_lookup and choices:
self._choices_lookup = self.parse_choices(choices)
# self._search_choices_lookup =\
# {v: k for k, v in six.iteritems(self._choices_lookup)}
self._allow_choices_lookup = True
else:
self._allow_choices_lookup = False
except AttributeError:
self.name = model_field
self.sort_column_name = sort_field or model_field
self.model_field = None
self._allow_choices_lookup = False
# @staticmethod
# def collect_model_columns(model, column_specs):
# """
# Build a list of either Columns or ForeignColumns as required
# """
# columns = [c['name'] for c in column_specs]
# foreign_fields = dict([(c['name'], c['foreign_field']) for c in column_specs if c['foreign_field']])
# fields = {f.name: f for f in model._meta.get_fields()}
# model_columns = {}
# for col_name in columns:
# if col_name in foreign_fields:
# new_column = ForeignColumn(
# col_name,
# model,
# foreign_fields[col_name]
# )
# elif col_name in fields:
# new_column = Column(fields[col_name])
# else:
# new_column = Column(col_name)
# model_columns[col_name] = new_column
# return model_columns
@staticmethod
def column_factory(model, column_spec):
"""
Build either a Column or a ForeignColumn as required
"""
fields = {f.name: f for f in model._meta.get_fields()}
col_name = column_spec['name']
sort_field = column_spec['sort_field']
foreign_field = column_spec.get('foreign_field', None)
m2m_foreign_field = column_spec.get('m2m_foreign_field', None)
if foreign_field:
new_column = ForeignColumn(col_name, model, foreign_field)
elif m2m_foreign_field:
new_column = ManyToManyColumn(col_name, model, m2m_foreign_field)
elif col_name in fields:
new_column = Column(fields[col_name], sort_field=sort_field)
else:
new_column = Column(col_name, sort_field=sort_field)
return new_column
@property
def has_choices_available(self):
return self._allow_choices_lookup
def get_field_search_path(self):
return self.sort_column_name
def parse_choices(self, choices):
choices_dict = {}
for choice in choices:
try:
choices_dict[choice[0]] = choice[1]
except IndexError:
choices_dict[choice[0]] = choice[0]
except UnicodeDecodeError:
choices_dict[choice[0]] = choice[1].decode('utf-8')
return choices_dict
def string_tags_in_case(self, value):
if STRIP_HTML_TAGS and value is not None:
return strip_tags(value)
return value
def render_column_value(self, obj, value):
if self._allow_choices_lookup:
#return self._choices_lookup[value]
return self.string_tags_in_case(self._choices_lookup.get(value, ''))
if isinstance(value, datetime.datetime):
value = format_datetime(value, True)
elif isinstance(value, datetime.date):
value = format_datetime(value, False)
elif isinstance(value, bool):
value = _('Yes') if value else _('No')
return self.string_tags_in_case(value)
def render_column(self, obj):
try:
value = getattr(obj, self.name)
except AttributeError:
value = '???'
return self.render_column_value(obj, value)
def search_in_choices(self, pattern_list):
if not self._allow_choices_lookup:
return []
# return [matching_value for key, matching_value in
# six.iteritems(self._search_choices_lookup) if key.startswith(value)]
values = []
if type(pattern_list) != list:
pattern_list = [pattern_list]
for pattern in pattern_list:
pattern = pattern.lower()
# values = [key for (key, text) in self._choices_lookup.items() if text.lower().startswith(pattern)]
values += [key for (key, text) in self._choices_lookup.items() if pattern in text.lower()]
return values
class ForeignColumn(Column):
def __init__(self, name, model, path_to_column, allow_choices_lookup=True):
self._field_search_path = path_to_column
self._field_path = path_to_column.split('__')
foreign_field = self.get_foreign_field(model)
super(ForeignColumn, self).__init__(foreign_field, allow_choices_lookup)
def get_field_search_path(self):
return self._field_search_path
def get_foreign_field(self, model):
path_items = self._field_path
path_item_count = len(path_items)
current_model = model
for idx, cur_field_name in enumerate(path_items):
fields = {f.name: f for f in current_model._meta.get_fields()}
if idx < path_item_count-1:
try:
current_field = fields[cur_field_name]
except KeyError:
# six.reraise(
# KeyError,
# "Field %s doesn't exists (model %s, path: %s)"
# % (cur_field_name, current_model.__name__,
# '__'.join(path_items[0:idx])))
raise KeyError(
"Field %s doesn't exists (model %s, path: %s)" % (
cur_field_name,
current_model.__name__,
'__'.join(path_items[0:idx])
)
)
try:
current_model = current_field.related_model
except AttributeError:
# six.reraise(
# AttributeError,
# "Field %s is not a foreign key (model %s, path %s)" %
# (cur_field_name, current_model.__name__,
# '__'.join(path_items[0:idx])))
raise AttributeError(
"Field %s is not a foreign key (model %s, path %s)" % (
cur_field_name,
current_model.__name__,
'__'.join(path_items[0:idx])
)
)
else:
foreign_field = fields[cur_field_name]
return foreign_field
def get_foreign_value(self, obj):
current_value = obj
for current_path_item in self._field_path:
try:
current_value = getattr(current_value, current_path_item)
except AttributeError:
try:
current_value = [
getattr(current_value, current_path_item)
for current_value in current_value.get_queryset()
]
except AttributeError:
try:
current_value = [getattr(f, current_path_item) for f in current_value]
except AttributeError:
current_value = None
if current_value is None:
return None
# use __str__() if no attribute has been specified by 'foreign_field'
# TODO: what happens with search and choices/autofilter ?
if isinstance(current_value, models.Model):
current_value = str(current_value)
return current_value
def render_column(self, obj):
value = self.get_foreign_value(obj)
return self.render_column_value(obj, value)
class ManyToManyColumn(ForeignColumn):
def get_foreign_value(self, obj):
current_value = obj
m2m_name, m2m_field = self._field_path
to_eval = f'obj.{m2m_name}_list'
# _list should be generated in optimize_queryset, if not we use regular .all() to get the m2m
if not hasattr(obj, f'{m2m_name}_list'):
to_eval = f'obj.{m2m_name}.all()'
return [
getattr(x, m2m_field)
for x in eval(to_eval)]
def render_column_value(self, obj, value_list):
if self._allow_choices_lookup:
return ', '.join([str(self._choices_lookup.get(value, '')) for value in value_list])
return ', '.join([str(value) for value in value_list])
class ColumnLink(object):
def __init__(self, name, model_column=None, searchable='true', orderable='true', search_value='',
placeholder=False):
self.name = name
self._model_column = model_column
self.searchable = True if searchable == "true" else False
self.orderable = True if orderable == "true" else False
self.search_value = search_value
self.placeholder = placeholder or (name == '')
def __repr__(self):
return '%s (searchable: %s, orderable: %s, search_value: "%s")' %\
(self.name or '<placeholder>', self.searchable, self.orderable, self.search_value)
def get_field_search_path(self):
return self._model_column.get_field_search_path()
def get_value(self, object_instance):
return self._model_column.render_column(object_instance)
def to_dict(self):
"""
Get a dictionary representation of :class:`InstanceResource`
"""
self_dict = {}
# for key, value in six.iteritems(self.__dict__):
for key, value in self.__dict__.items():
if not key.startswith('_'):
self_dict[key] = value
return self_dict
class PlaceholderColumnLink(ColumnLink):
def __init__(self):
super(PlaceholderColumnLink, self).__init__(
None, None, False, False, True)
def get_value(self, object_instance):
return None
class Order(object):
def __init__(self, column_index, direction, column_links_list):
try:
self.ascending = True if direction == 'asc' else False
self.column_link = column_links_list[int(column_index)]
if self.column_link.placeholder:
raise ColumnOrderError(
'Requested to order a placeholder column (index %d)' %
column_index)
except KeyError:
raise ColumnOrderError(
'Requested to order a non-existing column (index %d)' %
column_index)
def __repr__(self):
return '%s: %s' % (
self.column_link.name, 'ASC' if self.ascending else 'DESC')
def get_order_mode(self):
if not self.ascending:
return '-' + self.column_link.get_field_search_path()
return self.column_link.get_field_search_path()
################################################################################
| 37.187898
| 112
| 0.581999
|
7da4ae9a691ce71e891d58d91d9987103615db8b
| 342
|
py
|
Python
|
instana/__version__.py
|
darnat/Instana
|
e085e7bd2dc53fea7d5d4a0e6943322bf7d27b82
|
[
"MIT"
] | null | null | null |
instana/__version__.py
|
darnat/Instana
|
e085e7bd2dc53fea7d5d4a0e6943322bf7d27b82
|
[
"MIT"
] | null | null | null |
instana/__version__.py
|
darnat/Instana
|
e085e7bd2dc53fea7d5d4a0e6943322bf7d27b82
|
[
"MIT"
] | 1
|
2020-02-22T23:32:15.000Z
|
2020-02-22T23:32:15.000Z
|
# -*- coding: utf-8 -*-
__title__ = 'instana'
__description__ = 'Instagram Private API'
__url__ = 'https://github.com/darnat/Instana'
__version__ = '0.1.8.26'
__build__ = 0x000001
__author__ = 'Alexis Darnat'
__author_email__ = 'alexis.darnat@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Alexis Darnat'
__cake__ = u'✨ 🍰 ✨'
| 26.307692
| 46
| 0.719298
|
018381387a1f3bfb5342707fc170b07b1a807d7f
| 15,987
|
py
|
Python
|
wsynphot/base.py
|
jaladh-singhal/wsynphot
|
052c357a5b6c9f350210db439d831bb4f64ef571
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-06-25T17:39:08.000Z
|
2022-02-11T08:41:06.000Z
|
wsynphot/base.py
|
starkit/wsynphot
|
052c357a5b6c9f350210db439d831bb4f64ef571
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 23
|
2019-02-26T22:31:56.000Z
|
2022-01-04T21:27:28.000Z
|
wsynphot/base.py
|
starkit/wsynphot
|
052c357a5b6c9f350210db439d831bb4f64ef571
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 9
|
2018-10-18T19:02:40.000Z
|
2021-01-28T08:42:58.000Z
|
# defining the base filter curve classes
import os
from scipy import interpolate
from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D
import pandas as pd
from wsynphot.io.cache_filters import load_filter_index, load_transmission_data
from astropy import units as u, constants as const
from astropy import utils
import numpy as np
from wsynphot.calibration import get_vega_calibration_spectrum
def calculate_filter_flux_density(spectrum, filter):
"""
Calculate the average flux through the filter by evaluating the integral
..math::
f_lambda = \\frac{\\int_}{}
Parameters
----------
spectrum: ~specutils.Spectrum1D
spectrum object
filter: ~wsynphot.FilterCurve
:return:
"""
filtered_spectrum = filter * spectrum
filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength,
filtered_spectrum.wavelength)
return filter_flux_density
def calculate_vega_magnitude(spectrum, filter):
filter_flux_density = calculate_filter_flux_density(spectrum, filter)
wavelength_delta = filter.calculate_wavelength_delta()
filtered_f_lambda = (filter_flux_density / wavelength_delta)
zp_vega_f_lambda = filter.zp_vega_f_lambda
return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda)
def calculate_ab_magnitude(spectrum, filter):
filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) /
filter.calculate_wavelength_delta())
return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda)
def list_filters():
"""
List available filter sets along with their properties
"""
return load_filter_index()
class BaseFilterCurve(object):
"""
Basic filter curve class
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength for filter curve
transmission_lambda: numpy.ndarray
transmission_lambda for filter curve
interpolation_kind: str
allowed interpolation kinds given in scipy.interpolate.interp1d
"""
@classmethod
def load_filter(cls, filter_id=None, interpolation_kind='linear'):
"""
Parameters
----------
filter_id: str or None
if None is provided will return a DataFrame of all filters
interpolation_kind: str
see scipy.interpolation.interp1d
"""
if filter_id is None:
return list_filters()
else:
filter = load_transmission_data(filter_id)
wavelength_unit = 'angstrom'
wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit)
return cls(wavelength, filter['Transmission'].values,
interpolation_kind=interpolation_kind,
filter_id=filter_id)
def __init__(self, wavelength, transmission_lambda,
interpolation_kind='linear', filter_id=None):
if not hasattr(wavelength, 'unit'):
raise ValueError('the wavelength needs to be a astropy quantity')
self.wavelength = wavelength
self.transmission_lambda = transmission_lambda
self.interpolation_object = interpolate.interp1d(self.wavelength,
self.transmission_lambda,
kind=interpolation_kind,
bounds_error=False,
fill_value=0.0)
self.filter_id = filter_id
def __mul__(self, other):
if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'):
raise ValueError('requiring a specutils.Spectrum1D-like object that'
'has attributes "flux" and "wavelength"')
#new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value,
# self.wavelength.value) * self.wavelength.unit
transmission = self.interpolate(other.wavelength)
return Spectrum1D.from_array(other.wavelength, transmission * other.flux)
def __rmul__(self, other):
return self.__mul__(other)
@utils.lazyproperty
def lambda_pivot(self):
"""
Calculate the pivotal wavelength as defined in Bessell & Murphy 2012
.. math::
\\lambda_\\textrm{pivot} = \\sqrt{
\\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\
<f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c}
"""
return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/
(np.trapz(self.transmission_lambda / self.wavelength, self.wavelength))))
@utils.lazyproperty
def wavelength_start(self):
return self.get_wavelength_start()
@utils.lazyproperty
def wavelength_end(self):
return self.get_wavelength_end()
@utils.lazyproperty
def zp_ab_f_lambda(self):
return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to(
'erg/s/cm^2/Angstrom', u.spectral())
@utils.lazyproperty
def zp_ab_f_nu(self):
return (3631 * u.Jy).to('erg/s/cm^2/Hz')
@utils.lazyproperty
def zp_vega_f_lambda(self):
return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) /
self.calculate_wavelength_delta())
def interpolate(self, wavelength):
"""
Interpolate the filter onto new wavelength grid
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength grid to interpolate on
"""
converted_wavelength = wavelength.to(self.wavelength.unit)
return self.interpolation_object(converted_wavelength)
def _calculuate_flux_density(self, wavelength, flux):
return _calculcate_filter_flux_density(flux, self)
def calculate_flux_density(self, spectrum):
return calculate_filter_flux_density(spectrum, self)
def calculate_f_lambda(self, spectrum):
return (self.calculate_flux_density(spectrum) /
self.calculate_wavelength_delta())
def calculate_wavelength_delta(self):
"""
Calculate the Integral :math:`\integral
:return:
"""
return np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength)
def calculate_weighted_average_wavelength(self):
"""
Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}`
Returns
: ~astropy.units.Quantity
"""
return (np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength) / self.calculate_wavelength_delta())
def calculate_vega_magnitude(self, spectrum):
__doc__ = calculate_vega_magnitude.__doc__
return calculate_vega_magnitude(spectrum, self)
def calculate_ab_magnitude(self, spectrum):
__doc__ = calculate_ab_magnitude.__doc__
return calculate_ab_magnitude(spectrum, self)
def convert_ab_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_ab_f_lambda
def convert_vega_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_vega_f_lambda
def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={},
format_filter_id=None):
if scale_max is not None:
if hasattr(scale_max, 'unit'):
scale_max = scale_max.value
transmission = (self.transmission_lambda * scale_max
/ self.transmission_lambda.max())
else:
transmission = self.transmission_lambda
ax.plot(self.wavelength, transmission, **plot_kwargs)
ax.set_xlabel('Wavelength [{0}]'.format(
self.wavelength.unit.to_string(format='latex')))
ax.set_ylabel('Transmission [1]')
if make_label==True and self.filter_id is not None:
if format_filter_id is not None:
filter_id = format_filter_id(self.filter_id)
else:
filter_id = self.filter_id
text_x = (self.lambda_pivot).value
text_y = transmission.max()/2
ax.text(text_x, text_y, filter_id,
horizontalalignment='center', verticalalignment='center',
bbox=dict(facecolor='white', alpha=0.5))
def get_wavelength_start(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(threshold)]
def get_wavelength_end(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)]
class FilterCurve(BaseFilterCurve):
def __repr__(self):
if self.filter_id is None:
filter_id = "{0:x}".format(self.__hash__())
else:
filter_id = self.filter_id
return "FilterCurve <{0}>".format(filter_id)
class FilterSet(object):
"""
A set of filters
Parameters
----------
filter_set: ~list
a list of strings or a list of filters
interpolation_kind: ~str
scipy interpolaton kinds
"""
def __init__(self, filter_set, interpolation_kind='linear'):
if hasattr(filter_set[0], 'wavelength'):
self.filter_set = filter_set
else:
self.filter_set = [FilterCurve.load_filter(filter_id,
interpolation_kind=
interpolation_kind)
for filter_id in filter_set]
def __iter__(self):
self.current_filter_idx = 0
return self
def __next__(self):
try:
item = self.filter_set[self.current_filter_idx]
except IndexError:
raise StopIteration
self.current_filter_idx += 1
return item
next = __next__
def __getitem__(self, item):
return self.filter_set.__getitem__(item)
def __repr__(self):
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(
[item.filter_id
for item in self.filter_set]))
@property
def lambda_pivot(self):
return u.Quantity([item.lambda_pivot for item in self])
def calculate_f_lambda(self, spectrum):
return u.Quantity(
[item.calculate_f_lambda(spectrum) for item in self.filter_set])
def calculate_ab_magnitudes(self, spectrum):
mags = [item.calculate_ab_magnitude(spectrum)
for item in self.filter_set]
return mags
def calculate_vega_magnitudes(self, spectrum):
mags = [item.calculate_vega_magnitude(spectrum)
for item in self.filter_set]
return mags
def convert_ab_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_ab_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_vega_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def plot_spectrum(self, spectrum, ax, make_labels=True,
spectrum_plot_kwargs={}, filter_plot_kwargs={},
filter_color_list=None, format_filter_id=None):
"""
plot a spectrum with the given filters
spectrum:
ax:
make_labels:
:return:
"""
ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs)
for i, filter in enumerate(self.filter_set):
filter_scale = filter.calculate_f_lambda(spectrum)
if filter_color_list is not None:
filter_plot_kwargs['color'] = filter_color_list[i]
filter.plot(ax, scale_max=filter_scale, make_label=make_labels,
plot_kwargs=filter_plot_kwargs,
format_filter_id=format_filter_id)
class MagnitudeSet(FilterSet):
def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None,
interpolation_kind='linear'):
super(MagnitudeSet, self).__init__(filter_set,
interpolation_kind=
interpolation_kind)
self.magnitudes = np.array(magnitudes)
self.magnitude_uncertainties = np.array(magnitude_uncertainties)
def __repr__(self):
mag_str = '{0} {1:.4f} +/- {2:.4f}'
mag_data = []
for i, filter in enumerate(self.filter_set):
unc = (np.nan if self.magnitude_uncertainties is None
else self.magnitude_uncertainties[i])
mag_data.append(mag_str.format(filter.filter_id,
self.magnitudes[i], unc))
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(mag_data))
| 34.380645
| 108
| 0.620066
|
b0c9ffb7c75d28333c4f244da3999db3be25c74f
| 1,461
|
py
|
Python
|
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/ListUsersRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/ListUsersRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/ListUsersRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListUsersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'ListUsers','ehs')
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
| 34.785714
| 69
| 0.760438
|
cf48885a77f03ad6f4d37585177431421ba5230d
| 650
|
py
|
Python
|
examples/callback.py
|
mklemm2/grigori
|
2a2631e01b078fca734231fe3b7979f2b724a2f4
|
[
"MIT"
] | 3
|
2020-04-09T11:09:41.000Z
|
2022-01-08T23:19:54.000Z
|
examples/callback.py
|
mklemm2/grigori
|
2a2631e01b078fca734231fe3b7979f2b724a2f4
|
[
"MIT"
] | null | null | null |
examples/callback.py
|
mklemm2/grigori
|
2a2631e01b078fca734231fe3b7979f2b724a2f4
|
[
"MIT"
] | 1
|
2020-11-25T09:32:33.000Z
|
2020-11-25T09:32:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from grigori import Change
from grigori import Watcher
def on_added(change: dict) -> None:
print("file '%s' was added" % change["file"])
def on_modified(change: dict) -> None:
print("file '%s' was modified" % change["file"])
def on_deleted(change: dict) -> None:
print("file '%s' was deleted" % change["file"])
directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test")
w = Watcher(directory)
# Bind the callbacks.
w.on(Change.ADDED, on_added)
w.on(Change.MODIFIED, on_modified)
w.on(Change.DELETED, on_deleted)
w.wait() # Start watching the files.
| 19.69697
| 76
| 0.678462
|
c7f0c8b96159fa402654b56db05575b8f8a08403
| 54,315
|
bzl
|
Python
|
bazel/repository_locations.bzl
|
cmboling/envoy
|
1b49ba4fc7969ad3d5bc246521124f3a686c2961
|
[
"Apache-2.0"
] | null | null | null |
bazel/repository_locations.bzl
|
cmboling/envoy
|
1b49ba4fc7969ad3d5bc246521124f3a686c2961
|
[
"Apache-2.0"
] | null | null | null |
bazel/repository_locations.bzl
|
cmboling/envoy
|
1b49ba4fc7969ad3d5bc246521124f3a686c2961
|
[
"Apache-2.0"
] | null | null | null |
# This should match the schema defined in external_deps.bzl.
REPOSITORY_LOCATIONS_SPEC = dict(
bazel_compdb = dict(
project_name = "bazel-compilation-database",
project_desc = "Clang JSON compilation database support for Bazel",
project_url = "https://github.com/grailbio/bazel-compilation-database",
version = "0.4.5",
sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4",
strip_prefix = "bazel-compilation-database-{version}",
urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"],
release_date = "2020-08-01",
use_category = ["build"],
),
bazel_gazelle = dict(
project_name = "Gazelle",
project_desc = "Bazel BUILD file generator for Go projects",
project_url = "https://github.com/bazelbuild/bazel-gazelle",
version = "0.22.2",
sha256 = "b85f48fa105c4403326e9525ad2b2cc437babaa6e15a3fc0b1dbab0ab064bc7c",
urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"],
release_date = "2020-10-02",
use_category = ["build"],
),
bazel_toolchains = dict(
project_name = "bazel-toolchains",
project_desc = "Bazel toolchain configs for RBE",
project_url = "https://github.com/bazelbuild/bazel-toolchains",
version = "4.1.0",
sha256 = "179ec02f809e86abf56356d8898c8bd74069f1bd7c56044050c2cd3d79d0e024",
strip_prefix = "bazel-toolchains-{version}",
urls = [
"https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz",
],
release_date = "2021-05-21",
use_category = ["build"],
),
build_bazel_rules_apple = dict(
project_name = "Apple Rules for Bazel",
project_desc = "Bazel rules for Apple platforms",
project_url = "https://github.com/bazelbuild/rules_apple",
version = "0.31.2",
sha256 = "c84962b64d9ae4472adfb01ec2cf1aa73cb2ee8308242add55fa7cc38602d882",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"],
release_date = "2021-05-04",
use_category = ["build"],
),
rules_fuzzing = dict(
project_name = "Fuzzing Rules for Bazel",
project_desc = "Bazel rules for fuzz tests",
project_url = "https://github.com/bazelbuild/rules_fuzzing",
version = "0.1.3",
sha256 = "ce99c277c4e9e21f77222757936bf7ffb8823911497db84bdd57a796588fcf01",
strip_prefix = "rules_fuzzing-{version}",
urls = ["https://github.com/bazelbuild/rules_fuzzing/archive/v{version}.tar.gz"],
release_date = "2021-04-01",
use_category = ["test_only"],
implied_untracked_deps = [
# This is a repository rule generated to define an OSS-Fuzz fuzzing
# engine target from the CFLAGS/CXXFLAGS environment.
"rules_fuzzing_oss_fuzz",
],
),
envoy_build_tools = dict(
project_name = "envoy-build-tools",
project_desc = "Common build tools shared by the Envoy/UDPA ecosystem",
project_url = "https://github.com/envoyproxy/envoy-build-tools",
version = "a955a00bed5f35777a83899ee680f8530eee4718",
sha256 = "b0830dc6fc1e3a095c5d817ca768c89c407bdd71894e1641daf500d28cb269da",
strip_prefix = "envoy-build-tools-{version}",
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"],
release_date = "2021-05-25",
use_category = ["build"],
),
boringssl = dict(
project_name = "BoringSSL",
project_desc = "Minimal OpenSSL fork",
project_url = "https://github.com/google/boringssl",
# To update BoringSSL, which tracks Chromium releases:
# 1. Open https://omahaproxy.appspot.com/ and note <current_version> of linux/dev release.
# 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags/<current_version>/DEPS and note <boringssl_revision>.
# 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges <boringssl_revision>.
#
# chromium-92.0.4491.6 (linux/dev)
version = "c5ad6dcb65e532589e8acb9e9adbde62463af13d",
sha256 = "6b4674999af85c4a19f2b51132db3507520070923cd967bb1cd157d43b3f68d9",
strip_prefix = "boringssl-{version}",
urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2021-04-22",
cpe = "cpe:2.3:a:google:boringssl:*",
),
boringssl_fips = dict(
project_name = "BoringSSL (FIPS)",
project_desc = "FIPS compliant BoringSSL",
project_url = "https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md",
version = "fips-20190808",
sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8",
urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-08-08",
cpe = "cpe:2.3:a:google:boringssl:*",
),
com_google_absl = dict(
project_name = "Abseil",
project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase",
project_url = "https://abseil.io/",
version = "5d8fc9192245f0ea67094af57399d7931d6bd53f",
sha256 = "e3812f256dd7347a33bf9d93a950cf356c61c0596842ff07d8154cd415145d83",
strip_prefix = "abseil-cpp-{version}",
urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-11-24",
cpe = "N/A",
),
com_github_c_ares_c_ares = dict(
project_name = "c-ares",
project_desc = "C library for asynchronous DNS requests",
project_url = "https://c-ares.haxx.se/",
version = "1.17.1",
sha256 = "d73dd0f6de824afd407ce10750ea081af47eba52b8a6cb307d220131ad93fc40",
strip_prefix = "c-ares-{version}",
urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-11-19",
cpe = "cpe:2.3:a:c-ares_project:c-ares:*",
),
com_github_circonus_labs_libcircllhist = dict(
project_name = "libcircllhist",
project_desc = "An implementation of Circonus log-linear histograms",
project_url = "https://github.com/circonus-labs/libcircllhist",
version = "63a16dd6f2fc7bc841bb17ff92be8318df60e2e1",
sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c",
strip_prefix = "libcircllhist-{version}",
urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"],
use_category = ["controlplane", "observability_core", "dataplane_core"],
release_date = "2019-02-11",
cpe = "N/A",
),
com_github_cyan4973_xxhash = dict(
project_name = "xxHash",
project_desc = "Extremely fast hash algorithm",
project_url = "https://github.com/Cyan4973/xxHash",
version = "0.7.3",
sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7",
strip_prefix = "xxHash-{version}",
urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-03-05",
cpe = "N/A",
),
com_github_envoyproxy_sqlparser = dict(
project_name = "C++ SQL Parser Library",
project_desc = "Forked from Hyrise SQL Parser",
project_url = "https://github.com/envoyproxy/sql-parser",
version = "3b40ba2d106587bdf053a292f7e3bb17e818a57f",
sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71",
strip_prefix = "sql-parser-{version}",
urls = ["https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.filters.network.mysql_proxy",
"envoy.filters.network.postgres_proxy",
],
release_date = "2020-06-10",
cpe = "N/A",
),
com_github_mirror_tclap = dict(
project_name = "tclap",
project_desc = "Small, flexible library that provides a simple interface for defining and accessing command line arguments",
project_url = "http://tclap.sourceforge.net",
version = "1-2-1",
sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f",
strip_prefix = "tclap-tclap-{version}-release-final",
urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"],
release_date = "2011-04-16",
use_category = ["other"],
),
com_github_fmtlib_fmt = dict(
project_name = "fmt",
project_desc = "{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams",
project_url = "https://fmt.dev",
version = "7.0.3",
sha256 = "decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d",
strip_prefix = "fmt-{version}",
urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-08-06",
cpe = "cpe:2.3:a:fmt:fmt:*",
),
com_github_gabime_spdlog = dict(
project_name = "spdlog",
project_desc = "Very fast, header-only/compiled, C++ logging library",
project_url = "https://github.com/gabime/spdlog",
version = "1.7.0",
sha256 = "f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62",
strip_prefix = "spdlog-{version}",
urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-07-09",
cpe = "N/A",
),
com_github_google_libprotobuf_mutator = dict(
project_name = "libprotobuf-mutator",
project_desc = "Library to randomly mutate protobuffers",
project_url = "https://github.com/google/libprotobuf-mutator",
version = "1.0",
sha256 = "792f250fb546bde8590e72d64311ea00a70c175fd77df6bb5e02328fa15fe28e",
strip_prefix = "libprotobuf-mutator-{version}",
urls = ["https://github.com/google/libprotobuf-mutator/archive/v{version}.tar.gz"],
release_date = "2020-11-06",
use_category = ["test_only"],
),
com_github_google_tcmalloc = dict(
project_name = "tcmalloc",
project_desc = "Fast, multi-threaded malloc implementation",
project_url = "https://github.com/google/tcmalloc",
version = "9f385356c34d4fc11f76a000b609e2b446c20667",
sha256 = "652e48e0b9ef645db04bff8a3d4841c60ce07275f5d98e18e698dc92bd111291",
strip_prefix = "tcmalloc-{version}",
urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-11-04",
cpe = "N/A",
),
com_github_gperftools_gperftools = dict(
project_name = "gperftools",
project_desc = "tcmalloc and profiling libraries",
project_url = "https://github.com/gperftools/gperftools",
version = "2.8",
sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e",
strip_prefix = "gperftools-{version}",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"],
release_date = "2020-07-06",
use_category = ["dataplane_core", "controlplane"],
cpe = "cpe:2.3:a:gperftools_project:gperftools:*",
),
com_github_grpc_grpc = dict(
project_name = "gRPC",
project_desc = "gRPC C core library",
project_url = "https://grpc.io",
version = "1.34.0",
sha256 = "7372a881122cd85a7224435a1d58bc5e11c88d4fb98a64b83f36f3d1c2f16d39",
strip_prefix = "grpc-{version}",
urls = ["https://github.com/grpc/grpc/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-12-01",
cpe = "cpe:2.3:a:grpc:grpc:*",
),
com_github_luajit_luajit = dict(
project_name = "LuaJIT",
project_desc = "Just-In-Time compiler for Lua",
project_url = "https://luajit.org",
# The last release version, 2.1.0-beta3 has a number of CVEs filed
# against it. These may not impact correct non-malicious Lua code, but for prudence we bump.
version = "1d8b747c161db457e032a023ebbff511f5de5ec2",
sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3",
strip_prefix = "LuaJIT-{version}",
urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"],
release_date = "2020-10-12",
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.lua"],
cpe = "cpe:2.3:a:luajit:luajit:*",
),
com_github_moonjit_moonjit = dict(
project_name = "Moonjit",
project_desc = "LuaJIT fork with wider platform support",
project_url = "https://github.com/moonjit/moonjit",
version = "2.2.0",
sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6",
strip_prefix = "moonjit-{version}",
urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.lua"],
release_date = "2020-01-14",
cpe = "cpe:2.3:a:moonjit_project:moonjit:*",
),
com_github_nghttp2_nghttp2 = dict(
project_name = "Nghttp2",
project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C",
project_url = "https://nghttp2.org",
version = "1.42.0",
sha256 = "884d18a0158908125d58b1b61d475c0325e5a004e3d61a56b5fcc55d5f4b7af5",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-11-23",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
io_opentracing_cpp = dict(
project_name = "OpenTracing",
project_desc = "Vendor-neutral APIs and instrumentation for distributed tracing",
project_url = "https://opentracing.io",
version = "1.5.1",
sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301",
strip_prefix = "opentracing-cpp-{version}",
urls = ["https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = [
"envoy.tracers.datadog",
"envoy.tracers.dynamic_ot",
"envoy.tracers.lightstep",
],
release_date = "2019-01-16",
cpe = "N/A",
),
com_lightstep_tracer_cpp = dict(
project_name = "lightstep-tracer-cpp",
project_desc = "LightStep distributed tracing library for C++",
project_url = "https://github.com/lightstep/lightstep-tracer-cpp",
version = "1942b3f142e218ebc143a043f32e3278dafec9aa",
sha256 = "3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed",
strip_prefix = "lightstep-tracer-cpp-{version}",
urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.lightstep"],
release_date = "2020-08-25",
cpe = "N/A",
),
skywalking_data_collect_protocol = dict(
project_name = "skywalking-data-collect-protocol",
project_desc = "Data Collect Protocols of Apache SkyWalking",
project_url = "https://github.com/apache/skywalking-data-collect-protocol",
name = "skywalking_data_collect_protocol",
sha256 = "edfa970394511213eacc8055b4c13e4e9773e9196122a49e0db68f6162f67dff",
urls = ["https://github.com/apache/skywalking-data-collect-protocol/archive/v{version}.tar.gz"],
strip_prefix = "skywalking-data-collect-protocol-{version}",
version = "8.4.0",
use_category = ["observability_ext"],
extensions = ["envoy.tracers.skywalking"],
release_date = "2021-01-20",
cpe = "N/A",
),
com_github_skyapm_cpp2sky = dict(
project_name = "cpp2sky",
project_desc = "C++ SDK for Apache SkyWalking",
project_url = "https://github.com/SkyAPM/cpp2sky",
sha256 = "76117a63cf29355c28a75bc83bd1d7e5bc004039445e7c854ee752dfe66094e6",
version = "0.2.1",
strip_prefix = "cpp2sky-{version}",
urls = ["https://github.com/SkyAPM/cpp2sky/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.skywalking"],
release_date = "2021-03-17",
cpe = "N/A",
),
com_github_datadog_dd_opentracing_cpp = dict(
project_name = "Datadog OpenTracing C++ Client",
project_desc = "Datadog OpenTracing C++ Client",
project_url = "https://github.com/DataDog/dd-opentracing-cpp",
version = "1.2.1",
sha256 = "ae44699e4aa2d21b70ed897a6c0cf3ed7dfb411e1aae4e686e39af75cec7c9bf",
strip_prefix = "dd-opentracing-cpp-{version}",
urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.datadog"],
release_date = "2021-01-26",
cpe = "N/A",
),
com_github_google_benchmark = dict(
project_name = "Benchmark",
project_desc = "Library to benchmark code snippets",
project_url = "https://github.com/google/benchmark",
version = "1.5.1",
sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2",
strip_prefix = "benchmark-{version}",
urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"],
use_category = ["test_only"],
release_date = "2020-06-09",
),
com_github_libevent_libevent = dict(
project_name = "libevent",
project_desc = "Event notification library",
project_url = "https://libevent.org",
# This SHA includes the new "prepare" and "check" watchers, used for event loop performance
# stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition
# in the watchers (see https://github.com/libevent/libevent/pull/802).
# This also includes the fixes for https://github.com/libevent/libevent/issues/806
# and https://github.com/lyft/envoy-mobile/issues/215.
# This also includes the fixes for Phantom events with EV_ET (see
# https://github.com/libevent/libevent/issues/984).
# This also includes the wepoll backend for Windows (see
# https://github.com/libevent/libevent/pull/1006)
# TODO(adip): Update to v2.2 when it is released.
version = "62c152d9a7cd264b993dad730c4163c6ede2e0a3",
sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213",
strip_prefix = "libevent-{version}",
urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-07-28",
cpe = "cpe:2.3:a:libevent_project:libevent:*",
),
# This should be removed, see https://github.com/envoyproxy/envoy/issues/13261.
net_zlib = dict(
project_name = "zlib",
project_desc = "zlib compression library",
project_url = "https://zlib.net",
version = "79baebe50e4d6b73ae1f8b603f0ef41300110aa3",
# Use the dev branch of zlib to resolve fuzz bugs and out of bound
# errors resulting in crashes in zlib 1.2.11.
# TODO(asraa): Remove when zlib > 1.2.11 is released.
sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e",
strip_prefix = "zlib-{version}",
urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-04-14",
cpe = "cpe:2.3:a:gnu:zlib:*",
),
org_brotli = dict(
project_name = "brotli",
project_desc = "brotli compression library",
project_url = "https://brotli.org",
# Use the dev branch of brotli to resolve compilation issues.
# TODO(rojkov): Remove when brotli > 1.0.9 is released.
version = "0cd2e3926e95e7e2930f57ae3f4885508d462a25",
sha256 = "93810780e60304b51f2c9645fe313a6e4640711063ed0b860cfa60999dd256c5",
strip_prefix = "brotli-{version}",
urls = ["https://github.com/google/brotli/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.compression.brotli.compressor",
"envoy.compression.brotli.decompressor",
],
release_date = "2020-09-08",
cpe = "cpe:2.3:a:google:brotli:*",
),
com_github_zlib_ng_zlib_ng = dict(
project_name = "zlib-ng",
project_desc = "zlib fork (higher performance)",
project_url = "https://github.com/zlib-ng/zlib-ng",
version = "b802a303ce8b6c86fbe3f93d59e0a82333768c0c",
sha256 = "e051eade607ecbbfa2c7ed3087fe53e5d3a58325375e1e28209594138e4aa93d",
strip_prefix = "zlib-ng-{version}",
urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-10-18",
cpe = "N/A",
),
com_github_jbeder_yaml_cpp = dict(
project_name = "yaml-cpp",
project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec",
project_url = "https://github.com/jbeder/yaml-cpp",
version = "98acc5a8874faab28b82c28936f4b400b389f5d6",
sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f",
strip_prefix = "yaml-cpp-{version}",
urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"],
# YAML is also used for runtime as well as controlplane. It shouldn't appear on the
# dataplane but we can't verify this automatically due to code structure today.
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-07-27",
cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*",
),
com_github_msgpack_msgpack_c = dict(
project_name = "msgpack for C/C++",
project_desc = "MessagePack is an efficient binary serialization format",
project_url = "https://github.com/msgpack/msgpack-c",
version = "3.3.0",
sha256 = "6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b",
strip_prefix = "msgpack-{version}",
urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.datadog"],
release_date = "2020-06-05",
cpe = "N/A",
),
com_github_google_jwt_verify = dict(
project_name = "jwt_verify_lib",
project_desc = "JWT verification library for C++",
project_url = "https://github.com/google/jwt_verify_lib",
version = "28efec2e4df1072db0ed03597591360ec9f80aac",
sha256 = "7a5c35b7cbf633398503ae12cad8c2833e92b3a796eed68b6256d22d51ace5e1",
strip_prefix = "jwt_verify_lib-{version}",
urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.jwt_authn"],
release_date = "2020-11-05",
cpe = "N/A",
),
com_github_nodejs_http_parser = dict(
project_name = "HTTP Parser",
project_desc = "Parser for HTTP messages written in C",
project_url = "https://github.com/nodejs/http-parser",
# This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve
# requests with both Content-Legth and Transfer-Encoding: chunked headers set.
version = "4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878",
sha256 = "6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac",
strip_prefix = "http-parser-{version}",
urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-07-10",
cpe = "cpe:2.3:a:nodejs:node.js:*",
),
com_github_alibaba_hessian2_codec = dict(
project_name = "hessian2-codec",
project_desc = "hessian2-codec is a C++ library for hessian2 codec",
project_url = "https://github.com/alibaba/hessian2-codec.git",
version = "dd8e05487a27b367b90ce81f4e6e6f62d693a212",
sha256 = "93260c54406e11b7be078a7ea120f7ab0df475c733e68d010fde400c5c8c8162",
strip_prefix = "hessian2-codec-{version}",
urls = ["https://github.com/alibaba/hessian2-codec/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.dubbo_proxy"],
release_date = "2021-04-05",
cpe = "N/A",
),
com_github_tencent_rapidjson = dict(
project_name = "RapidJSON",
project_desc = "Fast JSON parser/generator for C++",
project_url = "https://rapidjson.org",
version = "dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1",
sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b",
strip_prefix = "rapidjson-{version}",
urls = ["https://github.com/Tencent/rapidjson/archive/{version}.tar.gz"],
# We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to
# disentangle uses on the dataplane, e.g. header_formatter, Squash filter.
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-12-03",
cpe = "cpe:2.3:a:tencent:rapidjson:*",
),
com_github_nlohmann_json = dict(
project_name = "nlohmann JSON",
project_desc = "Fast JSON parser/generator for C++",
project_url = "https://nlohmann.github.io/json",
version = "3.9.1",
sha256 = "4cf0df69731494668bdd6460ed8cb269b68de9c19ad8c27abc24cd72605b2d5b",
strip_prefix = "json-{version}",
urls = ["https://github.com/nlohmann/json/archive/v{version}.tar.gz"],
# This will be a replacement for rapidJSON used in extensions and may also be a fast
# replacement for protobuf JSON.
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-08-06",
cpe = "cpe:2.3:a:json_project:json:*",
),
# This is an external dependency needed while running the
# envoy docker image. A bazel target has been created since
# there is no binary package available for the utility on Ubuntu
# which is the base image used to build an envoy container.
# This is not needed to build an envoy binary or run tests.
com_github_ncopa_suexec = dict(
project_name = "su-exec",
project_desc = "Utility to switch user and group id, setgroups and exec",
project_url = "https://github.com/ncopa/su-exec",
version = "212b75144bbc06722fbd7661f651390dc47a43d1",
sha256 = "939782774079ec156788ea3e04dd5e340e993544f4296be76a9c595334ca1779",
strip_prefix = "su-exec-{version}",
urls = ["https://github.com/ncopa/su-exec/archive/{version}.tar.gz"],
use_category = ["other"],
release_date = "2019-09-18",
cpe = "N/A",
),
com_github_twitter_common_lang = dict(
project_name = "twitter.common.lang (Thrift)",
project_desc = "twitter.common Python language and compatibility facilities",
project_url = "https://pypi.org/project/twitter.common.lang",
version = "0.3.9",
sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1",
strip_prefix = "twitter.common.lang-{version}/src",
urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz"],
release_date = "2016-10-17",
use_category = ["test_only"],
),
com_github_twitter_common_rpc = dict(
project_name = "twitter.common.rpc (Thrift)",
project_desc = "twitter.common Thrift helpers including Finagle and SSL transports",
project_url = "https://pypi.org/project/twitter.common.rpc",
version = "0.3.9",
sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514",
strip_prefix = "twitter.common.rpc-{version}/src",
urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz"],
release_date = "2016-10-17",
use_category = ["test_only"],
),
com_github_twitter_common_finagle_thrift = dict(
project_name = "twitter.common.finagle-thrift",
project_desc = "twitter.common Thrift stubs for Zipkin RPC tracing support in Finagle",
project_url = "https://pypi.org/project/twitter.common.finagle-thrift",
version = "0.3.9",
sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a",
strip_prefix = "twitter.common.finagle-thrift-{version}/src",
urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz"],
release_date = "2016-10-17",
use_category = ["test_only"],
),
com_google_googletest = dict(
project_name = "Google Test",
project_desc = "Google's C++ test framework",
project_url = "https://github.com/google/googletest",
# Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0)
# see https://github.com/google/googletest/issues/2490
version = "a4ab0abb93620ce26efad9de9296b73b16e88588",
sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751",
strip_prefix = "googletest-{version}",
urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"],
release_date = "2020-09-10",
use_category = ["test_only"],
),
com_google_protobuf = dict(
project_name = "Protocol Buffers",
project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data",
project_url = "https://developers.google.com/protocol-buffers",
version = "3.16.0",
sha256 = "d7371dc2d46fddac1af8cb27c0394554b068768fc79ecaf5be1a1863e8ff3392",
strip_prefix = "protobuf-{version}",
urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-05-06",
cpe = "cpe:2.3:a:google:protobuf:*",
),
grpc_httpjson_transcoding = dict(
project_name = "grpc-httpjson-transcoding",
project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC",
project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding",
version = "f1591a41318104b7e27a26be12f502b106a16256",
sha256 = "440baf465096ce1a7152c6d1090a70e871e5ca93b23c6cf9f8cd79f028bf5bb8",
strip_prefix = "grpc-httpjson-transcoding-{version}",
urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.grpc_json_transcoder"],
release_date = "2021-05-08",
cpe = "N/A",
),
io_bazel_rules_go = dict(
project_name = "Go rules for Bazel",
project_desc = "Bazel rules for the Go language",
project_url = "https://github.com/bazelbuild/rules_go",
version = "0.27.0",
sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"],
use_category = ["build", "api"],
release_date = "2021-03-17",
implied_untracked_deps = [
"com_github_golang_protobuf",
"io_bazel_rules_nogo",
"org_golang_google_protobuf",
"org_golang_x_tools",
],
),
rules_cc = dict(
project_name = "C++ rules for Bazel",
project_desc = "Bazel rules for the C++ language",
project_url = "https://github.com/bazelbuild/rules_cc",
# TODO(lizan): pin to a point releases when there's a released version.
version = "b1c40e1de81913a3c40e5948f78719c28152486d",
sha256 = "71d037168733f26d2a9648ad066ee8da4a34a13f51d24843a42efa6b65c2420f",
strip_prefix = "rules_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"],
release_date = "2020-11-11",
use_category = ["build"],
),
rules_foreign_cc = dict(
project_name = "Rules for using foreign build systems in Bazel",
project_desc = "Rules for using foreign build systems in Bazel",
project_url = "https://github.com/bazelbuild/rules_foreign_cc",
version = "d54c78ab86b40770ee19f0949db9d74a831ab9f0",
sha256 = "e7446144277c9578141821fc91c55a61df7ae01bda890902f7286f5fd2f6ae46",
strip_prefix = "rules_foreign_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"],
release_date = "2020-10-26",
use_category = ["build"],
),
rules_python = dict(
project_name = "Python rules for Bazel",
project_desc = "Bazel rules for the Python language",
project_url = "https://github.com/bazelbuild/rules_python",
version = "0.1.0",
sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0",
release_date = "2020-10-15",
urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"],
use_category = ["build"],
),
six = dict(
project_name = "Six",
project_desc = "Python 2 and 3 compatibility library",
project_url = "https://pypi.org/project/six",
version = "1.12.0",
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz"],
release_date = "2018-12-09",
use_category = ["other"],
),
org_llvm_llvm = dict(
project_name = "LLVM",
project_desc = "LLVM Compiler Infrastructure",
project_url = "https://llvm.org",
version = "10.0.0",
sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf",
strip_prefix = "llvm-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"],
release_date = "2020-03-23",
use_category = ["dataplane_ext"],
extensions = [
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
],
cpe = "cpe:2.3:a:llvm:*:*",
),
com_github_wamr = dict(
project_name = "Webassembly Micro Runtime",
project_desc = "A standalone runtime with a small footprint for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasm-micro-runtime",
version = "a14a4487bb8b493bf6c68d83b03f12028d16f58a",
sha256 = "d68668e129f16a9ddd7a1a0da22b17905a25001ae2de398726d37880b61fee9e",
strip_prefix = "wasm-micro-runtime-{version}",
urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"],
release_date = "2021-05-14",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wamr"],
cpe = "N/A",
),
com_github_wavm_wavm = dict(
project_name = "WAVM",
project_desc = "WebAssembly Virtual Machine",
project_url = "https://wavm.github.io",
version = "79c3aa29366615d9b1593cd527e5b4b94cc6072a",
sha256 = "ce899269516313b400005a8cc9bc3bcd8329663f43f7b4baae211ea0cd456a39",
strip_prefix = "WAVM-{version}",
urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"],
release_date = "2021-03-31",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wavm"],
cpe = "cpe:2.3:a:webassembly_virtual_machine_project:webassembly_virtual_machine:*",
),
com_github_wasmtime = dict(
project_name = "wasmtime",
project_desc = "A standalone runtime for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasmtime",
version = "0.26.0",
sha256 = "e95d274822ac72bf06355bdfbeddcacae60d7e98fec8ee4b2e21740636fb5c2c",
strip_prefix = "wasmtime-{version}",
urls = ["https://github.com/bytecodealliance/wasmtime/archive/v{version}.tar.gz"],
release_date = "2021-04-05",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
cpe = "N/A",
),
com_github_wasm_c_api = dict(
project_name = "wasm-c-api",
project_desc = "WebAssembly C and C++ API",
project_url = "https://github.com/WebAssembly/wasm-c-api",
# this is the submodule's specific commit used by wasmtime
# https://github.com/bytecodealliance/wasmtime/tree/v0.25.0/crates/c-api
version = "c9d31284651b975f05ac27cee0bab1377560b87e",
sha256 = "c774044f51431429e878bd1b9e2a4e38932f861f9211df72f75e9427eb6b8d32",
strip_prefix = "wasm-c-api-{version}",
urls = ["https://github.com/WebAssembly/wasm-c-api/archive/{version}.tar.gz"],
release_date = "2021-01-11",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
cpe = "N/A",
),
io_opencensus_cpp = dict(
project_name = "OpenCensus C++",
project_desc = "OpenCensus tracing library",
project_url = "https://github.com/census-instrumentation/opencensus-cpp",
version = "ba631066779a534267fdb1321b19850eb2b0c000",
sha256 = "f239a40803f6e2e42b57c9e68771b0990c4ca8b2d76b440073cdf14f4211ad26",
strip_prefix = "opencensus-cpp-{version}",
urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.opencensus"],
release_date = "2020-10-08",
cpe = "N/A",
),
# This should be removed, see https://github.com/envoyproxy/envoy/issues/11816.
com_github_curl = dict(
project_name = "curl",
project_desc = "Library for transferring data with URLs",
project_url = "https://curl.haxx.se",
version = "7.77.0",
sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77",
strip_prefix = "curl-{version}",
urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"],
use_category = ["dataplane_ext", "observability_ext"],
extensions = [
"envoy.filters.http.aws_lambda",
"envoy.filters.http.aws_request_signing",
"envoy.grpc_credentials.aws_iam",
"envoy.tracers.opencensus",
],
release_date = "2021-05-26",
cpe = "cpe:2.3:a:haxx:libcurl:*",
),
com_googlesource_chromium_v8 = dict(
project_name = "V8",
project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++",
project_url = "https://v8.dev",
version = "9.2.230.2",
# This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh
# and contains complete checkout of V8 with all dependencies necessary to build wee8.
sha256 = "237b9816ee56ad9b86e12e082132d88c543be494385f9bf9797af2a415c05f56",
urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.v8"],
release_date = "2021-05-20",
cpe = "cpe:2.3:a:google:v8:*",
),
com_googlesource_quiche = dict(
project_name = "QUICHE",
project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols",
project_url = "https://quiche.googlesource.com/quiche",
version = "6460972177446abe179ea430bf85b217c5ce240b",
sha256 = "5397ae241fc505e887203dc2c2f439549e42b1287687b155bcecac34536db434",
# Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz
urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"],
use_category = ["dataplane_core"],
release_date = "2021-04-21",
cpe = "N/A",
),
com_googlesource_googleurl = dict(
project_name = "Chrome URL parsing library",
project_desc = "Chrome URL parsing library",
project_url = "https://quiche.googlesource.com/googleurl",
# Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz.
version = "ef0d23689e240e6c8de4c3a5296b209128c87373",
sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
extensions = [],
release_date = "2020-07-30",
cpe = "N/A",
),
com_google_cel_cpp = dict(
project_name = "Common Expression Language (CEL) C++ library",
project_desc = "Common Expression Language (CEL) C++ library",
project_url = "https://opensource.google/projects/cel",
version = "9841e3ee251f3cc4cd5b6dd9deee6818bc9f2854",
sha256 = "7e42cbad7d1068d6e7891ad101e2863e727692136d6b3a817c487b3cc7bcfdcc",
strip_prefix = "cel-cpp-{version}",
urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.rbac",
"envoy.filters.http.wasm",
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2020-12-17",
cpe = "N/A",
),
com_github_google_flatbuffers = dict(
project_name = "FlatBuffers",
project_desc = "Cross platform serialization library architected for maximum memory efficiency",
project_url = "https://github.com/google/flatbuffers",
version = "a83caf5910644ba1c421c002ef68e42f21c15f9f",
sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a",
strip_prefix = "flatbuffers-{version}",
urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.rbac",
"envoy.filters.http.wasm",
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2020-04-02",
cpe = "N/A",
),
com_googlesource_code_re2 = dict(
project_name = "RE2",
project_desc = "RE2, a regular expression library",
project_url = "https://github.com/google/re2",
version = "2020-07-06",
sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f",
strip_prefix = "re2-{version}",
urls = ["https://github.com/google/re2/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-07-06",
cpe = "N/A",
),
# Included to access FuzzedDataProvider.h. This is compiler agnostic but
# provided as part of the compiler-rt source distribution. We can't use the
# Clang variant as we are not a Clang-LLVM only shop today.
org_llvm_releases_compiler_rt = dict(
project_name = "compiler-rt",
project_desc = "LLVM compiler runtime library",
project_url = "https://compiler-rt.llvm.org",
version = "11.0.1",
sha256 = "087be3f1116e861cd969c9b0b0903c27028b52eaf45157276f50a9c2500687fc",
# Only allow peeking at fuzzer related files for now.
strip_prefix = "compiler-rt-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"],
release_date = "2020-12-18",
use_category = ["test_only"],
),
upb = dict(
project_name = "upb",
project_desc = "A small protobuf implementation in C (gRPC dependency)",
project_url = "https://github.com/protocolbuffers/upb",
version = "de76b31f9c56b28120580d53a6f8d7941fdb79eb",
sha256 = "487d84ce85065ff89ccde1c1ac2ea1515d2be411306e4adf1be6861dc4a4a86b",
release_date = "2020-12-29",
strip_prefix = "upb-{version}",
urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"],
use_category = ["controlplane"],
cpe = "N/A",
),
kafka_source = dict(
project_name = "Kafka (source)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.4.1",
sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd",
strip_prefix = "kafka-{version}/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/{version}.zip"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_broker"],
release_date = "2020-03-03",
cpe = "cpe:2.3:a:apache:kafka:*",
),
kafka_server_binary = dict(
project_name = "Kafka (server binary)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.4.1",
sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a",
strip_prefix = "kafka_2.12-{version}",
urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.12-{version}.tgz"],
release_date = "2020-03-12",
use_category = ["test_only"],
),
kafka_python_client = dict(
project_name = "Kafka (Python client)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.0.1",
sha256 = "05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979",
strip_prefix = "kafka-python-{version}",
urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"],
release_date = "2020-02-20",
use_category = ["test_only"],
),
proxy_wasm_cpp_sdk = dict(
project_name = "WebAssembly for Proxies (C++ SDK)",
project_desc = "WebAssembly for Proxies (C++ SDK)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk",
version = "d9baeb21d46ab07d4eb9295a5d53a1803b7b80af",
sha256 = "b517ac487e0ac4b5d4f951ec805f2e54d5aecece34159b053c5fb781fac5e0f5",
strip_prefix = "proxy-wasm-cpp-sdk-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.wasm.runtime.null",
"envoy.wasm.runtime.v8",
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
release_date = "2021-05-15",
cpe = "N/A",
),
proxy_wasm_cpp_host = dict(
project_name = "WebAssembly for Proxies (C++ host implementation)",
project_desc = "WebAssembly for Proxies (C++ host implementation)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host",
version = "e641ffa8893477cdb4720f572f50f003cd51a083",
sha256 = "20abaa0ff37b3765111fb81774bf4fa4630e23dc9c468b42016c4ebf4f27a38a",
strip_prefix = "proxy-wasm-cpp-host-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.wasm.runtime.null",
"envoy.wasm.runtime.v8",
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
release_date = "2021-05-07",
cpe = "N/A",
),
proxy_wasm_rust_sdk = dict(
project_name = "WebAssembly for Proxies (Rust SDK)",
project_desc = "WebAssembly for Proxies (Rust SDK)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-rust-sdk",
version = "28a94df25659b2107b67a11df0112f8f6833558b",
sha256 = "d3da0042fc119282223b7955962e8b3eed261242c8493f9dc8d07a08ca7e2e3e",
strip_prefix = "proxy-wasm-rust-sdk-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-rust-sdk/archive/{version}.tar.gz"],
use_category = ["test_only"],
release_date = "2021-02-09",
cpe = "N/A",
),
emscripten_toolchain = dict(
project_name = "Emscripten SDK",
project_desc = "Emscripten SDK (use by Wasm)",
project_url = "https://github.com/emscripten-core/emsdk",
version = "2.0.7",
sha256 = "ce7a5c76e8b425aca874cea329fd9ac44b203b777053453b6a37b4496c5ce34f",
strip_prefix = "emsdk-{version}",
urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"],
use_category = ["build"],
release_date = "2020-10-13",
),
rules_rust = dict(
project_name = "Bazel rust rules",
project_desc = "Bazel rust rules (used by Wasm)",
project_url = "https://github.com/bazelbuild/rules_rust",
version = "1b648302edb64d3ddcc159655bf065bff40e6571",
sha256 = "242deacf4c9e4274d90964689dfae6c245bfb1bfa5e3336b2ad3b44f2541b70c",
strip_prefix = "rules_rust-{version}",
urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
release_date = "2021-04-02",
cpe = "N/A",
),
rules_antlr = dict(
project_name = "ANTLR Rules for Bazel",
project_desc = "Bazel rules for ANTLR",
project_url = "https://github.com/marcohu/rules_antlr",
version = "3cc2f9502a54ceb7b79b37383316b23c4da66f9a",
sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429",
strip_prefix = "rules_antlr-{version}",
urls = ["https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz"],
# ANTLR has a runtime component, so is not purely build.
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2019-06-21",
cpe = "N/A",
),
antlr4_runtimes = dict(
project_name = "ANTLR v4",
project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files",
project_url = "https://github.com/antlr/antlr4",
version = "4.7.2",
sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64",
strip_prefix = "antlr4-{version}",
urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2018-12-18",
cpe = "N/A",
),
)
| 50.952158
| 185
| 0.651975
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.