content
stringlengths 5
1.05M
|
|---|
__all__ = ["data_commands", "print"]
|
import os.path
import numpy as np
from itertools import chain
from argparse import ArgumentParser
from sklearn.preprocessing import StandardScaler
from typing import Iterable, Dict, Any
from utils.constants import TEST, INPUTS, OUTPUT, SAMPLE_ID, TIMESTAMP, TRAIN
from utils.file_utils import iterate_files, read_by_file_suffix
from utils.data_writer import DataWriter
def data_generator(data_folder: str) -> Iterable[Dict[str, Any]]:
for data_file in iterate_files(data_folder, pattern=r'.*jsonl.gz'):
for sample in read_by_file_suffix(data_file):
# indices = list(range(len(sample[INPUTS])))
# sampled_indices = np.sort(np.random.choice(indices, size=seq_length, replace=False))
# sample[INPUTS] =
yield sample
def fit_input_scaler(data_folder: str) -> StandardScaler:
inputs = [sample[INPUTS] for sample in data_generator(data_folder)]
num_features = len(inputs[0][0])
input_array = np.array(inputs).reshape(-1, num_features) # Reshape to a 2D office
scaler = StandardScaler().fit(input_array)
return scaler
def to_fixed_point(val: float, precision: int) -> int:
return int(val * (1 << precision))
def create_testing_set(data_folder: str, precision: int, chunk_size: int):
output_folder = os.path.join(data_folder, '{0}_{1}'.format(TEST, precision))
input_scaler = fit_input_scaler(os.path.join(data_folder, TRAIN))
inputs: List[List[float]] = []
outputs: List[int] = []
for sample in data_generator(os.path.join(data_folder, TEST)):
# Scale inputs and then convert to fixed point
scaled_inputs = input_scaler.transform(sample[INPUTS])
fixed_point_inputs = [list(map(lambda x: to_fixed_point(x, precision), features)) for features in scaled_inputs]
inputs.append(fixed_point_inputs)
outputs.append(sample[OUTPUT])
# with DataWriter(output_folder, file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size) as writer:
# test_folder = os.path.join(data_folder, TEST)
# for index, sample in enumerate(data_generator(test_folder, seq_length)):
# writer.add(sample)
# inputs.append(sample[INPUTS])
# outputs.append(sample[OUTPUT])
# if (index + 1) % chunk_size == 0:
# print('Completed {0} samples.'.format(index + 1), end='\r')
# print()
# print('Completed. Writing to text files.')
# Write to a text file to make it easier for the C implementation
txt_input_file = os.path.join(data_folder, '{0}_{1}_inputs.txt'.format(TEST, precision))
with open(txt_input_file, 'w') as txt_file:
for seq in inputs:
flattened = chain(*seq)
txt_file.write(' '.join(map(str, flattened)))
txt_file.write('\n')
txt_output_file = os.path.join(data_folder, '{0}_{1}_outputs.txt'.format(TEST, precision))
with open(txt_output_file, 'w') as txt_file:
for label in outputs:
txt_file.write(str(label))
txt_file.write('\n')
if __name__ == '__main__':
parser = ArgumentParser('Samples sequences from the testing set to create a concrete set of test samples')
parser.add_argument('--data-folder', type=str, required=True)
parser.add_argument('--precision', type=int, required=True)
parser.add_argument('--chunk-size', type=int, default=5000)
args = parser.parse_args()
create_testing_set(args.data_folder, args.precision, args.chunk_size)
|
BBBBBBB BBBBBBBBBBBBBBBBB
BBBB BBBBBBBBBBBBBB
BBBBB BBBBBBBBBBBB
BBBBBBBB
XXXX XXXXXXXXXXXXXXBBBBB BBBBBBBBBBXX BBBBBBBB XXXXXXXX
BBBBB BBBB
XXXXXX
BBBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBB
FFFFFFFFFFFFFFFFFFFF
BBBBBBBBBBB
BBBBBBBB
BBBBB BBBBBBBBBBBBB
BBBBBBBB
|
"""
skfuzzy.control subpackage, providing a high-level API for fuzzy system design.
"""
__all__ = ['Antecedent',
'Consequent',
'CrispValueCalculatorError',
'DefuzzifyError',
'EmptyMembershipError',
'NoTermMembershipsError',
'ControlSystem',
'ControlSystemSimulation',
'Rule',
'accumulation_max',
'accumulation_mult',
]
from .antecedent_consequent import (Antecedent, Consequent,
accumulation_max, accumulation_mult)
from .controlsystem import ControlSystem, ControlSystemSimulation
from .exceptions import (CrispValueCalculatorError, DefuzzifyError,
EmptyMembershipError, NoTermMembershipsError)
from .rule import Rule
|
"""ADB-related exceptions.
"""
from . import constants
class AdbCommandFailureException(Exception):
"""A ``b'FAIL'`` packet was received.
"""
class DeviceAuthError(Exception):
"""Device authentication failed.
"""
def __init__(self, message, *args):
message %= args
super(DeviceAuthError, self).__init__(message, *args)
class InterleavedDataError(Exception):
"""We only support command sent serially.
"""
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum.
"""
class InvalidCommandError(Exception):
"""Got an invalid command.
"""
def __init__(self, message, response_header, response_data):
if response_header == constants.FAIL:
message = 'Command failed, device said so. (%s)' % message
super(InvalidCommandError, self).__init__(message, response_header, response_data)
class InvalidResponseError(Exception):
"""Got an invalid response to our command.
"""
class TcpTimeoutException(Exception):
"""TCP connection timed read/write operation exceeded the allowed time.
"""
|
valores = []
while True:
valor = (int(input('Digite um valor: ')))
if valor in valores:
print(f'\033[:31mO valor {valor} já existi na lista! Não vou adicionar...\033[m')
else:
valores.append(valor)
print('Valor adicionado com sucesso...')
perg = ' '
while perg not in 'SN':
perg = str(input('Quer continuar? [S / N] ')).upper().strip()[0]
if perg == 'N':
break
print('='*30)
valores.sort()
print(f'Os valores digitados foram {valores}')
|
# -*- coding: utf-8 -*-
"""
DEFAULT PARAMETERS OF SPAFHY FOR A SINGLE CATCHMENT AND POINT-SCALE SIMULATIONS
Created on Mon Jun 25 18:34:12 2018
@author: slauniai
Last edit: 11.5.2020 / SL: canopygrid can now have multiple vegetation types.
Phenology is common to all, LAI-cycle common to all deciduous
"""
def parameters():
pgen = {'catchment_id': '1',
'gis_folder': r'C:\SpaFHy_v1_Pallas\data\C16',
'forcing_file': r'C:\SpaFHy_v1_Pallas\data\Kenttarova_forcing.csv',
'runoff_file': r'C:\SpaFHy_v1_Pallas\data\obs\Runoffs1d_SVEcatchments_mmd.csv', #
'ncf_file': r'C3.nc',
'results_folder': r'C:\SpaFHy_v1_Pallas\Results',
'start_date': '2016-01-01',
'end_date': '2019-10-01',
#'end_date': '2016-07-01',
'spinup_end': '2016-05-01',
'dt': 86400.0,
'spatial_cpy': True,
'spatial_soil': True
}
# canopygrid
pcpy = {'loc': {'lat': 67.995, 'lon': 24.224},
'flow' : { # flow field
'zmeas': 2.0,
'zground': 0.5,
'zo_ground': 0.01
},
'interc': { # interception
'wmax': 1.5, # storage capacity for rain (mm/LAI)
'wmaxsnow': 4.5, # storage capacity for snow (mm/LAI),
},
'snow': {
# degree-day snow model / energy balance
'kmelt': 2.8934e-05, # melt coefficient in open (mm/s)
'kfreeze': 5.79e-6, # freezing coefficient (mm/s)
'r': 0.05, # maximum fraction of liquid in snow (-)
'albpow': 1.0, # parameter lowering albedo for aging snow
'albground': 0.21, # albedo for snowfree ground
'cAtten': 1.0, # attenuation coeffient for canopy longwave radiation (Montehit and Unsworth 2013 fig 8.2)
'RDthres': 0.001, # amount of precipitation [m] above which a day is considered cloudy (affects long wave inputs)
'Tmin': 1.0, # threshold below which all precipitation is snow
'Tmax': 0.0, # threshold above which all precipitation is water (note in between will be divided to rain and snow)
},
# canopy conductance
'physpara': {
'kp': 0.6, # canopy light attenuation parameter (-)
'rw': 0.20, # critical value for REW (-),
'rwmin': 0.02, # minimum relative conductance (-)
# soil evaporation
'gsoil': 1e-2 # soil surface conductance if soil is fully wet (m/s)
},
'spec_para': {
'conif': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 2.1, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': False,
},
'decid': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 3.5, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': True,
},
'shrub': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 3.0, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': False,
},
'grass': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 5.0, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': True,
},
},
'phenopara': {
# phenology
'smax': 18.5, # degC
'tau': 13.0, # days
'xo': -4.0, # degC
'fmin': 0.05, # minimum photosynthetic capacity in winter (-)
# annual cycle of leaf-area in deciduous trees
'lai_decid_min': 0.1, # minimum relative LAI (-)
'ddo': 45.0, # degree-days for bud-burst (5degC threshold)
'ddur': 23.0, # duration of leaf development (days)
'sdl': 9.0, # daylength for senescence start (h)
'sdur': 30.0, # duration of leaf senescence (days),
},
'state': {# LAI is annual maximum LAI and for gridded simulations are input from GisData!
# keys must be 'LAI_ + key in spec_para
'LAI_conif': 1.0,
'LAI_decid': 1.0,
'LAI_shrub': 0.1,
'LAI_grass': 0.2,
'hc': 16.0, # canopy height (m)
'cf': 0.6, # canopy closure fraction (-)
#initial state of canopy storage [mm] and snow water equivalent [mm]
'w': 0.0, # canopy storage mm
'swe': 0.0, # snow water equivalent mm
'Wice': 0.0, # ice in snowpack
'Wliq': 0.0, # liquid water in snowpack
'd_nosnow': 1.0, # days since snowfall
'd_snow': 0.0, # days with snow on the ground
'Tsnow': -4.0, # snow temperature
'alb': 0, # initial albedo
'emAir': 0 # intiail air emissivity
}
}
# BUCKET
pbu = {'depth': 0.4, # root zone depth (m)
# following soil properties are used if spatial_soil = False
'poros': 0.43, # porosity (-)
'fc': 0.33, # field capacity (-)
'wp': 0.13, # wilting point (-)
'ksat': 2.0e-6, # saturated hydraulic conductivity
'beta': 4.7,
#organic (moss) layer
'org_depth': 0.05, # depth of organic top layer (m)
'org_poros': 0.9, # porosity (-)
'org_fc': 0.30, # field capacity (-)
'org_rw': 0.15, # critical vol. moisture content (-) for decreasing phase in Ef
'maxpond': 0.0, # max ponding allowed (m)
#initial states: rootzone and toplayer soil saturation ratio [-] and pond storage [m]
'rootzone_sat': 0.6, # root zone saturation ratio (-)
'org_sat': 1.0, # organic top layer saturation ratio (-)
'pond_sto': 0.0 # pond storage
}
# TOPMODEL
ptop = {'dt': 86400.0, # timestep (s)
'm': 0.025, # scaling depth (m)
'ko': 0.001, # transmissivity parameter (ms-1)
'twi_cutoff': 99.5, # cutoff of cumulative twi distribution (%)
'so': 0.05 # initial saturation deficit (m)
}
return pgen, pcpy, pbu, ptop
def soil_properties():
"""
Defines 5 soil types: Fine, Medium and Coarse textured + organic Peat
and Humus.
Currently SpaFHy needs following parameters: soil_id, poros, dc, wp, wr,
n, alpha, Ksat, beta
"""
psoil = {
'CoarseTextured':
{'airentry': 20.8,
'alpha': 0.024,
'beta': 3.1,
'fc': 0.21,
'ksat': 1E-04,
'n': 1.2,
'poros': 0.41,
'soil_id': 1.0,
'wp': 0.10,
'wr': 0.05,
},
'MediumTextured':
{'airentry': 20.8,
'alpha': 0.024,
'beta': 4.7,
'fc': 0.33,
'ksat': 1E-05,
'n': 1.2,
'poros': 0.43,
'soil_id': 2.0,
'wp': 0.13,
'wr': 0.05,
},
'FineTextured':
{'airentry': 34.2,
'alpha': 0.018, # van genuchten parameter
'beta': 7.9,
'fc': 0.34,
'ksat': 1E-06, # saturated hydraulic conductivity
'n': 1.16, # van genuchten parameter
'poros': 0.5, # porosity (-)
'soil_id': 3.0,
'wp': 0.25, # wilting point (-)
'wr': 0.07,
},
'Peat':
{'airentry': 29.2,
'alpha': 0.08, # Menbery et al. 2021
'beta': 6.0,
'fc': 0.53, # Menbery et al. 2021
'ksat': 6e-05, # Menbery et al. 2021
'n': 1.75, # Menbery et al. 2021
'poros': 0.93, # Menbery et al. 2021
'soil_id': 4.0,
'wp': 0.36, # Menbery et al. 2021
'wr': 0.0,
},
'Humus':
{'airentry': 29.2,
'alpha': 0.123,
'beta': 6.0,
'fc': 0.35,
'ksat': 8e-06,
'n': 1.28,
'poros': 0.85,
'soil_id': 5.0,
'wp': 0.15,
'wr': 0.01,
},
}
return psoil
def topsoil():
"""
Properties of typical topsoils
Following main site type (1-4) classification
"""
topsoil = {
'mineral':{
'topsoil_id': 1,
'org_depth': 0.05,
'org_poros': 0.9,
'org_fc': 0.33,
'org_rw': 0.15
},
'fen':{
'topsoil_id': 2,
'org_depth': 0.05,
'org_poros': 0.9,
'org_fc': 0.514,
'org_rw': 0.15
},
'peatland':{
'topsoil_id': 3,
'org_depth': 0.05,
'org_poros': 0.9,
'org_fc': 0.514,
'org_rw': 0.15
},
'openmire':{
'topsoil_id': 4,
'org_depth': 0.05,
'org_poros': 0.9,
'org_fc': 0.514,
'org_rw': 0.15
}
}
return topsoil
def parameters_FIHy():
# parameter file for running SpaFHy_point at FIHy-site
pgen = {'catchment_id': 'FIHy',
'gis_folder': None,
'forcing_file': r'c:/Repositories/SpaFHy_v1_Pallas/Data/HydeDaily2000-2010.txt',
'runoff_file': None,
'output_file': r'c:/Repositories/SpaFHy_v1_Pallas/Results/FIHy_test',
'start_date': '2013-01-01',
'end_date': '2016-12-31',
'spinup_end': '2013-12-31',
'dt': 86400.0,
'spatial_cpy': False,
'spatial_soil': False
}
# canopygrid
pcpy = {'loc': {'lat': 61.4, 'lon': 23.7},
'flow' : { # flow field
'zmeas': 2.0,
'zground': 0.5,
'zo_ground': 0.01
},
'interc': { # interception
'wmax': 1.5, # storage capacity for rain (mm/LAI)
'wmaxsnow': 4.5, # storage capacity for snow (mm/LAI),
},
'snow': {
# degree-day snow model
'kmelt': 2.8934e-05, # melt coefficient in open (mm/s)
'kfreeze': 5.79e-6, # freezing coefficient (mm/s)
'r': 0.05 # maximum fraction of liquid in snow (-)
},
# canopy conductance
'physpara': {
'kp': 0.6, # canopy light attenuation parameter (-)
'rw': 0.20, # critical value for REW (-),
'rwmin': 0.02, # minimum relative conductance (-)
# soil evaporation
'gsoil': 1e-2 # soil surface conductance if soil is fully wet (m/s)
},
'spec_para': {
'conif': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 2.1, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': False,
},
'decid': {'amax': 10.0, # maximum photosynthetic rate (umolm-2(leaf)s-1)
'g1': 3.5, # stomatal parameter
'q50': 50.0, # light response parameter (Wm-2)
'lai_cycle': True,
},
},
'phenopara': {
# phenology
'smax': 18.5, # degC
'tau': 13.0, # days
'xo': -4.0, # degC
'fmin': 0.05, # minimum photosynthetic capacity in winter (-)
# annual cycle of leaf-area in deciduous trees
'lai_decid_min': 0.1, # minimum relative LAI (-)
'ddo': 45.0, # degree-days for bud-burst (5degC threshold)
'ddur': 23.0, # duration of leaf development (days)
'sdl': 9.0, # daylength for senescence start (h)
'sdur': 30.0, # duration of leaf senescence (days),
},
'state': {# LAI is annual maximum LAI and for gridded simulations are input from GisData!
# keys must be 'LAI_ + key in spec_para
'LAI_conif': 3.5,
'LAI_decid': 0.5,
'hc': 16.0, # canopy height (m)
'cf': 0.6, # canopy closure fraction (-)
#initial state of canopy storage [mm] and snow water equivalent [mm]
'w': 0.0, # canopy storage mm
'swe': 0.0, # snow water equivalent mm
}
}
# BUCKET
pbu = {'depth': 0.4, # root zone depth (m)
# following soil properties are used if spatial_soil = False
'poros': 0.43, # porosity (-)
'fc': 0.33, # field capacity (-)
'wp': 0.13, # wilting point (-)
'ksat': 2.0e-6,
'beta': 4.7,
#organic (moss) layer
'org_depth': 0.04, # depth of organic top layer (m)
'org_poros': 0.9, # porosity (-)
'org_fc': 0.3, # field capacity (-)
'org_rw': 0.24, # critical vol. moisture content (-) for decreasing phase in Ef
'maxpond': 0.0, # max ponding allowed (m)
#initial states: rootzone and toplayer soil saturation ratio [-] and pond storage [m]
'rootzone_sat': 0.6, # root zone saturation ratio (-)
'org_sat': 1.0, # organic top layer saturation ratio (-)
'pond_sto': 0.0, # pond storage
'soilcode': -1 # site-specific values
}
return pgen, pcpy, pbu
|
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
from rotkehlchen.accounting.structures import Balance
from rotkehlchen.assets.asset import Asset, EthereumToken
from rotkehlchen.chain.ethereum.makerdao.common import RAY
from rotkehlchen.chain.ethereum.structures import AaveEvent
from rotkehlchen.chain.ethereum.zerion import DefiProtocolBalances
from rotkehlchen.constants.ethereum import (
AAVE_ETH_RESERVE_ADDRESS,
AAVE_LENDING_POOL,
ATOKEN_ABI,
ZERO_ADDRESS,
)
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.fval import FVal
from rotkehlchen.history.price import query_usd_price_zero_if_error
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.premium.premium import Premium
from rotkehlchen.serialization.deserialize import (
deserialize_blocknumber,
deserialize_int_from_hex_or_int,
)
from rotkehlchen.typing import ChecksumEthAddress
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import EthereumModule
from rotkehlchen.utils.misc import hex_or_bytes_to_address, hex_or_bytes_to_int
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.manager import EthereumManager
ATOKEN_TO_DEPLOYED_BLOCK = {
'aETH': 9241088,
'aDAI': 9241063,
'aUSDC': 9241071,
'aSUSD': 9241077,
'aTUSD': 9241068,
'aUSDT': 9241076,
'aBUSD': 9747321,
'aBAT': 9241085,
'aKNC': 9241097,
'aLEND': 9241081,
'aLINK': 9241091,
'aMANA': 9241110,
'aMKR': 9241106,
'aREP': 9241100,
'aSNX': 9241118,
'aWBTC': 9241225,
'aZRX': 9241114,
}
ATOKENS_LIST = [EthereumToken(x) for x in ATOKEN_TO_DEPLOYED_BLOCK]
A_LEND = EthereumToken('LEND')
class AaveLendingBalance(NamedTuple):
"""A balance for Aave lending.
Asset not included here since it's the key in the map that leads to this structure
"""
balance: Balance
apy: FVal
def serialize(self) -> Dict[str, Union[str, Dict[str, str]]]:
return {
'balance': self.balance.serialize(),
'apy': self.apy.to_percentage(precision=2),
}
class AaveBorrowingBalance(NamedTuple):
"""A balance for Aave borrowing.
Asset not included here since it's the key in the map that leads to this structure
"""
balance: Balance
variable_apr: FVal
stable_apr: FVal
def serialize(self) -> Dict[str, Union[str, Dict[str, str]]]:
return {
'balance': self.balance.serialize(),
'variable_apr': self.variable_apr.to_percentage(precision=2),
'stable_apr': self.stable_apr.to_percentage(precision=2),
}
class AaveBalances(NamedTuple):
"""The Aave balances per account. Using str for symbol since ETH is not a token"""
lending: Dict[str, AaveLendingBalance]
borrowing: Dict[str, AaveBorrowingBalance]
class AaveHistory(NamedTuple):
"""All events and total interest accrued for all Atoken of an address
"""
events: List[AaveEvent]
total_earned: Dict[EthereumToken, Balance]
def _get_reserve_address_decimals(symbol: str) -> Tuple[ChecksumEthAddress, int]:
"""Get the reserve address and the number of decimals for symbol"""
if symbol == 'ETH':
reserve_address = AAVE_ETH_RESERVE_ADDRESS
decimals = 18
else:
token = EthereumToken(symbol)
reserve_address = token.ethereum_address
decimals = token.decimals
return reserve_address, decimals
class Aave(EthereumModule):
"""Aave integration module
https://docs.aave.com/developers/developing-on-aave/the-protocol/
"""
def __init__(
self,
ethereum_manager: 'EthereumManager',
database: DBHandler,
premium: Optional[Premium],
msg_aggregator: MessagesAggregator,
) -> None:
self.ethereum = ethereum_manager
self.database = database
self.msg_aggregator = msg_aggregator
self.premium = premium
def get_balances(
self,
given_defi_balances: Union[
Dict[ChecksumEthAddress, List[DefiProtocolBalances]],
Callable[[], Dict[ChecksumEthAddress, List[DefiProtocolBalances]]],
],
) -> Dict[ChecksumEthAddress, AaveBalances]:
"""Retrieves the aave balances
Receives the defi balances from zerion as an argument. They can either be directly given
as the defi balances mapping or as a callable that will retrieve the
balances mapping when executed.
"""
aave_balances = {}
reserve_cache: Dict[str, Tuple[Any, ...]] = {}
if isinstance(given_defi_balances, dict):
defi_balances = given_defi_balances
else:
defi_balances = given_defi_balances()
for account, balance_entries in defi_balances.items():
lending_map = {}
borrowing_map = {}
for balance_entry in balance_entries:
if balance_entry.protocol.name != 'Aave':
continue
# Depending on whether it's asset or debt we find what the reserve asset is
if balance_entry.balance_type == 'Asset':
asset = balance_entry.underlying_balances[0]
else:
asset = balance_entry.base_balance
reserve_address, _ = _get_reserve_address_decimals(asset.token_symbol)
reserve_data = reserve_cache.get(reserve_address, None)
if reserve_data is None:
reserve_data = self.ethereum.call_contract(
contract_address=AAVE_LENDING_POOL.address,
abi=AAVE_LENDING_POOL.abi,
method_name='getReserveData',
arguments=[reserve_address],
)
reserve_cache[balance_entry.base_balance.token_symbol] = reserve_data
if balance_entry.balance_type == 'Asset':
lending_map[asset.token_symbol] = AaveLendingBalance(
balance=asset.balance,
apy=FVal(reserve_data[4] / RAY),
)
else: # 'Debt'
borrowing_map[asset.token_symbol] = AaveBorrowingBalance(
balance=asset.balance,
variable_apr=FVal(reserve_data[5] / RAY),
stable_apr=FVal(reserve_data[6] / RAY),
)
if lending_map == {} and borrowing_map == {}:
# no aave balances for the account
continue
aave_balances[account] = AaveBalances(lending=lending_map, borrowing=borrowing_map)
return aave_balances
def get_history(
self,
addresses: List[ChecksumEthAddress],
reset_db_data: bool,
) -> Dict[ChecksumEthAddress, AaveHistory]:
result = {}
latest_block = self.ethereum.get_latest_block_number()
if reset_db_data is True:
self.database.delete_aave_data()
for address in addresses:
last_query = self.database.get_used_query_range(f'aave_events_{address}')
history_results = self.get_history_for_address(
user_address=address,
to_block=latest_block,
given_from_block=last_query[1] + 1 if last_query is not None else None,
)
if len(history_results.events) == 0:
continue
result[address] = history_results
return result
def get_history_for_address(
self,
user_address: ChecksumEthAddress,
to_block: int,
atokens_list: Optional[List[EthereumToken]] = None,
given_from_block: Optional[int] = None,
) -> AaveHistory:
# Get all deposit events for the address
from_block = AAVE_LENDING_POOL.deployed_block if given_from_block is None else given_from_block # noqa: E501
argument_filters = {
'_user': user_address,
}
query_events = True
if given_from_block is not None and to_block - given_from_block < 250: # noqa: E501
query_events = False # Save time by not querying events if last query is recent
deposit_events = []
withdraw_events = []
if query_events:
deposit_events.extend(self.ethereum.get_logs(
contract_address=AAVE_LENDING_POOL.address,
abi=AAVE_LENDING_POOL.abi,
event_name='Deposit',
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
))
withdraw_events.extend(self.ethereum.get_logs(
contract_address=AAVE_LENDING_POOL.address,
abi=AAVE_LENDING_POOL.abi,
event_name='RedeemUnderlying',
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
))
# now for each atoken get all mint events and pass then to profit calculation
tokens = atokens_list if atokens_list is not None else ATOKENS_LIST
total_address_events = []
total_earned_map = {}
for token in tokens:
log.debug(
f'Querying aave events for {user_address} and token '
f'{token.identifier} with query_events={query_events}',
)
events = []
if given_from_block:
events.extend(self.database.get_aave_events(user_address, token))
new_events = []
if query_events:
new_events = self.get_events_for_atoken_and_address(
user_address=user_address,
atoken=token,
deposit_events=deposit_events,
withdraw_events=withdraw_events,
from_block=from_block,
to_block=to_block,
)
events.extend(new_events)
total_balance = Balance()
for x in events:
if x.event_type == 'interest':
total_balance += x.value
# If the user still has balance in Aave we also need to see how much
# accrued interest has not been yet paid out
# TODO: ARCHIVE if to_block is not latest here we should get the balance
# from the old block. Means using archive node
balance = self.ethereum.call_contract(
contract_address=token.ethereum_address,
abi=ATOKEN_ABI,
method_name='balanceOf',
arguments=[user_address],
)
principal_balance = self.ethereum.call_contract(
contract_address=token.ethereum_address,
abi=ATOKEN_ABI,
method_name='principalBalanceOf',
arguments=[user_address],
)
if len(events) == 0 and balance == 0 and principal_balance == 0:
# Nothing for this aToken for this address
continue
unpaid_interest = (balance - principal_balance) / (FVal(10) ** FVal(token.decimals))
usd_price = Inquirer().find_usd_price(token)
total_balance += Balance(
amount=unpaid_interest,
usd_value=unpaid_interest * usd_price,
)
total_earned_map[token] = total_balance
total_address_events.extend(events)
# now update the DB with the recently queried events
self.database.add_aave_events(user_address, new_events)
# After all events have been queried then also update the query range.
# Even if no events are found for an address we need to remember the range
self.database.update_used_block_query_range(
name=f'aave_events_{user_address}',
from_block=AAVE_LENDING_POOL.deployed_block,
to_block=to_block,
)
total_address_events.sort(key=lambda event: event.timestamp)
return AaveHistory(events=total_address_events, total_earned=total_earned_map)
def get_events_for_atoken_and_address(
self,
user_address: ChecksumEthAddress,
atoken: EthereumToken,
deposit_events: List[Dict[str, Any]],
withdraw_events: List[Dict[str, Any]],
from_block: int,
to_block: int,
) -> List[AaveEvent]:
argument_filters = {
'from': ZERO_ADDRESS,
'to': user_address,
}
mint_events = self.ethereum.get_logs(
contract_address=atoken.ethereum_address,
abi=ATOKEN_ABI,
event_name='Transfer',
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
)
mint_data = set()
mint_data_to_log_index = {}
for event in mint_events:
amount = hex_or_bytes_to_int(event['data'])
if amount == 0:
continue # first mint can be for 0. Ignore
entry = (
deserialize_blocknumber(event['blockNumber']),
amount,
self.ethereum.get_event_timestamp(event),
event['transactionHash'],
)
mint_data.add(entry)
mint_data_to_log_index[entry] = deserialize_int_from_hex_or_int(
event['logIndex'], 'aave log index',
)
reserve_asset = Asset(atoken.identifier[1:])
reserve_address, decimals = _get_reserve_address_decimals(reserve_asset.identifier)
aave_events = []
for event in deposit_events:
if hex_or_bytes_to_address(event['topics'][1]) == reserve_address:
# first 32 bytes of the data are the amount
deposit = hex_or_bytes_to_int(event['data'][:66])
block_number = deserialize_blocknumber(event['blockNumber'])
timestamp = self.ethereum.get_event_timestamp(event)
tx_hash = event['transactionHash']
log_index = deserialize_int_from_hex_or_int(event['logIndex'], 'aave log index')
# If there is a corresponding deposit event remove the minting event data
entry = (block_number, deposit, timestamp, tx_hash)
if entry in mint_data:
mint_data.remove(entry)
del mint_data_to_log_index[entry]
usd_price = query_usd_price_zero_if_error(
asset=reserve_asset,
time=timestamp,
location='aave deposit',
msg_aggregator=self.msg_aggregator,
)
deposit_amount = deposit / (FVal(10) ** FVal(decimals))
aave_events.append(AaveEvent(
event_type='deposit',
asset=reserve_asset,
value=Balance(
amount=deposit_amount,
usd_value=deposit_amount * usd_price,
),
block_number=block_number,
timestamp=timestamp,
tx_hash=tx_hash,
log_index=log_index,
))
for data in mint_data:
usd_price = query_usd_price_zero_if_error(
asset=atoken,
time=data[2],
location='aave interest profit',
msg_aggregator=self.msg_aggregator,
)
interest_amount = data[1] / (FVal(10) ** FVal(decimals))
aave_events.append(AaveEvent(
event_type='interest',
asset=atoken,
value=Balance(
amount=interest_amount,
usd_value=interest_amount * usd_price,
),
block_number=data[0],
timestamp=data[2],
tx_hash=data[3],
log_index=mint_data_to_log_index[data],
))
for event in withdraw_events:
if hex_or_bytes_to_address(event['topics'][0]) == reserve_address:
# first 32 bytes of the data are the amount
withdrawal = hex_or_bytes_to_int(event['data'][:66])
block_number = deserialize_blocknumber(event['blockNumber'])
timestamp = self.ethereum.get_event_timestamp(event)
tx_hash = event['transactionHash']
usd_price = query_usd_price_zero_if_error(
asset=reserve_asset,
time=timestamp,
location='aave withdrawal',
msg_aggregator=self.msg_aggregator,
)
withdrawal_amount = withdrawal / (FVal(10) ** FVal(decimals))
aave_events.append(AaveEvent(
event_type='withdrawal',
asset=reserve_asset,
value=Balance(
amount=withdrawal_amount,
usd_value=withdrawal_amount * usd_price,
),
block_number=block_number,
timestamp=timestamp,
tx_hash=tx_hash,
log_index=deserialize_int_from_hex_or_int(event['logIndex'], 'aave log index'),
))
return aave_events
# -- Methods following the EthereumModule interface -- #
def on_startup(self) -> None:
pass
def on_account_addition(self, address: ChecksumEthAddress) -> None:
pass
def on_account_removal(self, address: ChecksumEthAddress) -> None:
pass
|
#!/usr/bin/env python3.6
# -*- coding: utf8 -*-
'''
ELQuent.modifier
Massive modification suite for multiple assets
Mateusz Dąbrowski
github.com/MateuszDabrowski
linkedin.com/in/mateusz-dabrowski-marketing/
'''
# Python imports
import os
import csv
import sys
import json
from colorama import Fore, Style, init
# ELQuent imports
import utils.api.api as api
import utils.helper as helper
# Initialize colorama
init(autoreset=True)
# Globals
naming = None
source_country = None
# Predefined messege elements
ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}'
WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] '
SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] '
YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}'
NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}'
def country_naming_setter(country):
'''
Sets source_country for all functions
Loads json file with naming convention
'''
global source_country
source_country = country
# Prepares globals for imported modules
helper.country_naming_setter(source_country)
# Loads json file with naming convention
with open(file('naming'), 'r', encoding='utf-8') as f:
global naming
naming = json.load(f)
'''
=================================================================================
File Path Getter
=================================================================================
'''
def file(file_path, name=''):
'''
Returns file path to template files
'''
def find_data_file(filename, directory):
'''
Returns correct file path for both script and frozen app
'''
if directory == 'outcomes': # For saving outcomes
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, directory, filename)
elif directory == 'api': # For writing auth files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
file_paths = {
'naming': find_data_file('naming.json', directory='api'),
'outcome-csv': find_data_file(f'WK{source_country}_{name}.csv', directory='outcomes')
}
return file_paths.get(file_path)
'''
=================================================================================
Redirect Inactive LP's
=================================================================================
'''
def get_completed_campaigns(redirected_campaigns):
'''
Returns a list of ['id', 'name'] for all completed campaigns
that weren't already redirected during previous runs
'''
# Builds search query for campaign API
search_query = f"name='WK{source_country}*'"
# Iterates over pages of outcomes
page = 1
completed_campaigns = []
print(f'{Fore.WHITE}[{Fore.YELLOW}SYNC{Fore.WHITE}] ', end='', flush=True)
while True:
campaigns = api.eloqua_get_assets(
search_query, asset_type='campaign', page=page, depth='minimal')
# Creates list of completed campaigns » ['id', 'name']
for campaign in campaigns['elements']:
if campaign.get('currentStatus', 'Completed') == 'Completed'\
and campaign.get('id') not in redirected_campaigns:
completed_campaigns.append(
[campaign.get('id'), campaign.get('name')]
)
# Stops iteration when full list is obtained
if campaigns['total'] - page * 500 < 0:
break
# Else increments page to get next part of outcomes
page += 1
# Every ten batches draws hyphen for better readability
if page % 10 == 0:
print(f'{Fore.YELLOW}-', end='', flush=True)
return completed_campaigns
def put_modified_lp(completed_campaigns):
'''
Required completed_campaigns list of campaign ['id', 'name']
Finds all Landing Pages connected to campaigns from completed_campaigns list
Adds redirect script to the <head> tag and updates HTML in Eloqua
Returns a string with comma-separated id for every redirected campaign
'''
redirected_campaigns_string = ''
print(f'\n\n{Fore.WHITE}[{Fore.YELLOW}MODIFY{Fore.WHITE}] ')
for campaign in completed_campaigns:
# Create search query to get all LPs connected to campaign
campaign_name = campaign[1]
search_query = campaign_name.split('_')
search_query = ('_').join(search_query[0:4]) + '*'
print(f'{Fore.WHITE}[{Fore.YELLOW}Campaign{Fore.WHITE}] › '
f'{Fore.YELLOW}{campaign_name}')
# Iterates over pages of outcomes
page = 1
while True:
# Gets full data on landing page
landing_pages = api.eloqua_get_assets(
search_query, asset_type='landingPage', page=page)
if not landing_pages['elements']:
print(
f' {Fore.WHITE}[{Fore.YELLOW}LP{Fore.WHITE}] » {Fore.YELLOW}Landing Page not found')
# Write modifier outcome to csv file
with open(file('outcome-csv', f'redirected-campaigns'), 'a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow([campaign[0], campaign[1],
'not found', 'not found', False])
break
for landing_page in landing_pages['elements']:
# Skips adding redirection if there is one already
if 'window.location.replace' in landing_page['htmlContent'].get('html'):
continue
landing_page_ending = landing_page.get("name").split('_')[-1]
print(f' {Fore.WHITE}[{Fore.YELLOW}LP{Fore.WHITE}] {Fore.YELLOW}» '
f'{Fore.WHITE}ID: {landing_page.get("id")} {Fore.YELLOW}› '
f'{Fore.WHITE}_{landing_page_ending}', end=' ')
# Builds valid redirect link string
redirect_link = naming[source_country]['id']['redirect']\
+ f'?utm_source={landing_page.get("name")}&utm_medium=redirect'
redirect_link = f'<head><script>window.location.replace("{redirect_link}")</script>'
# Gets and modifies code of the LP with redirect link
landing_page_html = landing_page['htmlContent'].get('html')
landing_page_html = landing_page_html.replace(
r'<head>', redirect_link,
)
# Build landing page data
data = {
'id': landing_page.get('id'),
'name': landing_page.get('name'),
'description': 'ELQuent API » Redirected',
'folderId': landing_page.get('folderId'),
'micrositeId': landing_page.get('micrositeId'),
'relativePath': landing_page.get('relativePath'),
'htmlContent': {
'type': 'RawHtmlContent',
'html': landing_page_html
}
}
# Upload modified LP
landing_page_modification = api.eloqua_put_landingpage(
landing_page.get('id'), data)
# Write modifier outcome to csv file
with open(file('outcome-csv', f'redirected-campaigns'), 'a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow([campaign[0],
campaign[1],
landing_page.get('id'),
landing_page.get('name'),
landing_page_modification])
print(f'{SUCCESS}')
# Stops iteration when full list is obtained
if landing_pages['total'] - page * 20 < 0:
# Adds ID to a string containg all redirected campaigns
redirected_campaigns_string += ',' + campaign[0]
break
# Else increments page to get next part of outcomes
page += 1
# Every ten batches draws hyphen for better readability
if page % 10 == 0:
print(f'{Fore.YELLOW}-', end='', flush=True)
return redirected_campaigns_string
def redirect_lp():
'''
Allows user to add redirecting script to all landing pages belonging to completed campaigns
'''
# Gets redirect base URL
redirect_link = naming[source_country]['id']['redirect']
# Doublecheck if user is sure he wants to continue
choice = ''
while choice.lower() != 'y' and choice.lower() != 'n':
print(f'\n{Fore.YELLOW}» {Fore.WHITE}Continue with redirecting '
f'all WK{source_country} completed campaign LPs to:'
f'\n {Fore.YELLOW}{redirect_link}{Fore.WHITE}? ({YES}/{NO}):', end=' ')
choice = input('')
if choice.lower() == 'y':
break
elif choice.lower() == 'n':
return False
# Gets list of already redirected or no-LP campaigns
redirected_campaigns_shared_list = api.eloqua_asset_get(
naming[source_country]['id']['redirected_list'], 'sharedContent', depth='complete')
old_redirected_campaigns = redirected_campaigns_shared_list['contentHtml']
old_redirected_campaigns = old_redirected_campaigns.split(',')
# Write modifier outcome to csv file
with open(file('outcome-csv', f'redirected-campaigns'), 'w', encoding='utf-8') as f:
fieldnames = ['Campaign ID', 'Campaign Name',
'Landing Page ID', 'Landing Page Name', 'Redirected']
writer = csv.writer(f)
writer.writerow(fieldnames)
# Gets list of completed campaigns
completed_campaigns = get_completed_campaigns(old_redirected_campaigns)
# Gets list of modified landing pages
new_redirected_campaigns = put_modified_lp(completed_campaigns)
# Creates a string with id's of all redirected campaigns (previously and now)
all_redirected_campaigns = \
(',').join(old_redirected_campaigns) + new_redirected_campaigns
if all_redirected_campaigns.startswith(','): # in case of no old campaigns
all_redirected_campaigns = all_redirected_campaigns[1:]
# Build shared content data for updating the list of redirected campaings
data = {
'id': redirected_campaigns_shared_list.get('id'),
'name': redirected_campaigns_shared_list.get('name'),
'contentHTML': all_redirected_campaigns
}
# Updating list of redirected campaigns to shared content
api.eloqua_put_sharedcontent(
naming[source_country]['id']['redirected_list'], data=data)
print(f'\n{SUCCESS}List of redirected campaigns saved to Outcomes folder!')
return
'''
=================================================================================
Modifier module menu
=================================================================================
'''
def modifier_module(country):
'''
Lets user choose which modifier module utility he wants to use
'''
# Create global source_country and load json file with naming convention
country_naming_setter(country)
# Campaign type chooser
print(
f'\n{Fore.GREEN}ELQuent.modifier Utilites:'
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t» [{Fore.YELLOW}Redirect{Fore.WHITE}] Adds redirect script to completed campaigns LPs'
f'\n{Fore.WHITE}[{Fore.YELLOW}Q{Fore.WHITE}]\t» [{Fore.YELLOW}Quit to main menu{Fore.WHITE}]'
)
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice.lower() == 'q':
break
elif choice == '1':
redirect_lp()
break
else:
print(f'{Fore.RED}Entered value does not belong to any utility!')
choice = ''
return
|
#!/usr/bin/python
import logging
import optparse
import os
import time
from progress.bar import Bar, ShadyBar
from nmj.updater import NMJUpdater
_LOGGER = logging.getLogger("nmj")
def get_lock(root_dir):
while os.path.isfile(os.path.join(root_dir, "pynmj.lock")):
time.sleep(0.5)
fd = open(os.path.join(root_dir, "pynmj.lock"), "w+")
fd.write("lock\n")
fd.close()
def release_lock(root_dir):
try:
os.remove(os.path.join(root_dir, "pynmj.lock"))
except:
pass
def parse_options():
parser = optparse.OptionParser()
parser.add_option(
"-n", "--clean-name",
dest="clean_name", action="store_true", default=False,
help="Clean videos file names",
)
return parser.parse_args()
def main():
logging.basicConfig(level=logging.INFO, filename="pynmj.log")
#_LOGGER.setLevel(logging.INFO)
options, arguments = parse_options()
try:
try:
get_lock(arguments[0])
updater = NMJUpdater(arguments[0], "local_directory")
if options.clean_name:
updater.clean_names()
medias = updater.scan_dir()
_LOGGER.info("Found %s medias", len(medias))
bar = ShadyBar('Updating database', max=len(medias), suffix='[ETA %(eta_td)s] (%(percent)d%%)')
for rank, media in enumerate(medias):
_LOGGER.info("Media %s/%s", rank+1, len(medias))
updater.search_media_and_add(media)
bar.next()
_LOGGER.info("Cleaning DB...")
updater.clean()
_LOGGER.info("Done")
bar.finish()
except:
import traceback;traceback.print_exc()
finally:
release_lock(arguments[0])
if __name__ == "__main__":
main()
|
'''
Init file for pydanfossair
'''
from . import *
__version__ = '0.1.0'
|
from getratings.models.ratings import Ratings
class NA_XinZhao_Jng_Aatrox(Ratings):
pass
class NA_XinZhao_Jng_Ahri(Ratings):
pass
class NA_XinZhao_Jng_Akali(Ratings):
pass
class NA_XinZhao_Jng_Alistar(Ratings):
pass
class NA_XinZhao_Jng_Amumu(Ratings):
pass
class NA_XinZhao_Jng_Anivia(Ratings):
pass
class NA_XinZhao_Jng_Annie(Ratings):
pass
class NA_XinZhao_Jng_Ashe(Ratings):
pass
class NA_XinZhao_Jng_AurelionSol(Ratings):
pass
class NA_XinZhao_Jng_Azir(Ratings):
pass
class NA_XinZhao_Jng_Bard(Ratings):
pass
class NA_XinZhao_Jng_Blitzcrank(Ratings):
pass
class NA_XinZhao_Jng_Brand(Ratings):
pass
class NA_XinZhao_Jng_Braum(Ratings):
pass
class NA_XinZhao_Jng_Caitlyn(Ratings):
pass
class NA_XinZhao_Jng_Camille(Ratings):
pass
class NA_XinZhao_Jng_Cassiopeia(Ratings):
pass
class NA_XinZhao_Jng_Chogath(Ratings):
pass
class NA_XinZhao_Jng_Corki(Ratings):
pass
class NA_XinZhao_Jng_Darius(Ratings):
pass
class NA_XinZhao_Jng_Diana(Ratings):
pass
class NA_XinZhao_Jng_Draven(Ratings):
pass
class NA_XinZhao_Jng_DrMundo(Ratings):
pass
class NA_XinZhao_Jng_Ekko(Ratings):
pass
class NA_XinZhao_Jng_Elise(Ratings):
pass
class NA_XinZhao_Jng_Evelynn(Ratings):
pass
class NA_XinZhao_Jng_Ezreal(Ratings):
pass
class NA_XinZhao_Jng_Fiddlesticks(Ratings):
pass
class NA_XinZhao_Jng_Fiora(Ratings):
pass
class NA_XinZhao_Jng_Fizz(Ratings):
pass
class NA_XinZhao_Jng_Galio(Ratings):
pass
class NA_XinZhao_Jng_Gangplank(Ratings):
pass
class NA_XinZhao_Jng_Garen(Ratings):
pass
class NA_XinZhao_Jng_Gnar(Ratings):
pass
class NA_XinZhao_Jng_Gragas(Ratings):
pass
class NA_XinZhao_Jng_Graves(Ratings):
pass
class NA_XinZhao_Jng_Hecarim(Ratings):
pass
class NA_XinZhao_Jng_Heimerdinger(Ratings):
pass
class NA_XinZhao_Jng_Illaoi(Ratings):
pass
class NA_XinZhao_Jng_Irelia(Ratings):
pass
class NA_XinZhao_Jng_Ivern(Ratings):
pass
class NA_XinZhao_Jng_Janna(Ratings):
pass
class NA_XinZhao_Jng_JarvanIV(Ratings):
pass
class NA_XinZhao_Jng_Jax(Ratings):
pass
class NA_XinZhao_Jng_Jayce(Ratings):
pass
class NA_XinZhao_Jng_Jhin(Ratings):
pass
class NA_XinZhao_Jng_Jinx(Ratings):
pass
class NA_XinZhao_Jng_Kalista(Ratings):
pass
class NA_XinZhao_Jng_Karma(Ratings):
pass
class NA_XinZhao_Jng_Karthus(Ratings):
pass
class NA_XinZhao_Jng_Kassadin(Ratings):
pass
class NA_XinZhao_Jng_Katarina(Ratings):
pass
class NA_XinZhao_Jng_Kayle(Ratings):
pass
class NA_XinZhao_Jng_Kayn(Ratings):
pass
class NA_XinZhao_Jng_Kennen(Ratings):
pass
class NA_XinZhao_Jng_Khazix(Ratings):
pass
class NA_XinZhao_Jng_Kindred(Ratings):
pass
class NA_XinZhao_Jng_Kled(Ratings):
pass
class NA_XinZhao_Jng_KogMaw(Ratings):
pass
class NA_XinZhao_Jng_Leblanc(Ratings):
pass
class NA_XinZhao_Jng_LeeSin(Ratings):
pass
class NA_XinZhao_Jng_Leona(Ratings):
pass
class NA_XinZhao_Jng_Lissandra(Ratings):
pass
class NA_XinZhao_Jng_Lucian(Ratings):
pass
class NA_XinZhao_Jng_Lulu(Ratings):
pass
class NA_XinZhao_Jng_Lux(Ratings):
pass
class NA_XinZhao_Jng_Malphite(Ratings):
pass
class NA_XinZhao_Jng_Malzahar(Ratings):
pass
class NA_XinZhao_Jng_Maokai(Ratings):
pass
class NA_XinZhao_Jng_MasterYi(Ratings):
pass
class NA_XinZhao_Jng_MissFortune(Ratings):
pass
class NA_XinZhao_Jng_MonkeyKing(Ratings):
pass
class NA_XinZhao_Jng_Mordekaiser(Ratings):
pass
class NA_XinZhao_Jng_Morgana(Ratings):
pass
class NA_XinZhao_Jng_Nami(Ratings):
pass
class NA_XinZhao_Jng_Nasus(Ratings):
pass
class NA_XinZhao_Jng_Nautilus(Ratings):
pass
class NA_XinZhao_Jng_Nidalee(Ratings):
pass
class NA_XinZhao_Jng_Nocturne(Ratings):
pass
class NA_XinZhao_Jng_Nunu(Ratings):
pass
class NA_XinZhao_Jng_Olaf(Ratings):
pass
class NA_XinZhao_Jng_Orianna(Ratings):
pass
class NA_XinZhao_Jng_Ornn(Ratings):
pass
class NA_XinZhao_Jng_Pantheon(Ratings):
pass
class NA_XinZhao_Jng_Poppy(Ratings):
pass
class NA_XinZhao_Jng_Quinn(Ratings):
pass
class NA_XinZhao_Jng_Rakan(Ratings):
pass
class NA_XinZhao_Jng_Rammus(Ratings):
pass
class NA_XinZhao_Jng_RekSai(Ratings):
pass
class NA_XinZhao_Jng_Renekton(Ratings):
pass
class NA_XinZhao_Jng_Rengar(Ratings):
pass
class NA_XinZhao_Jng_Riven(Ratings):
pass
class NA_XinZhao_Jng_Rumble(Ratings):
pass
class NA_XinZhao_Jng_Ryze(Ratings):
pass
class NA_XinZhao_Jng_Sejuani(Ratings):
pass
class NA_XinZhao_Jng_Shaco(Ratings):
pass
class NA_XinZhao_Jng_Shen(Ratings):
pass
class NA_XinZhao_Jng_Shyvana(Ratings):
pass
class NA_XinZhao_Jng_Singed(Ratings):
pass
class NA_XinZhao_Jng_Sion(Ratings):
pass
class NA_XinZhao_Jng_Sivir(Ratings):
pass
class NA_XinZhao_Jng_Skarner(Ratings):
pass
class NA_XinZhao_Jng_Sona(Ratings):
pass
class NA_XinZhao_Jng_Soraka(Ratings):
pass
class NA_XinZhao_Jng_Swain(Ratings):
pass
class NA_XinZhao_Jng_Syndra(Ratings):
pass
class NA_XinZhao_Jng_TahmKench(Ratings):
pass
class NA_XinZhao_Jng_Taliyah(Ratings):
pass
class NA_XinZhao_Jng_Talon(Ratings):
pass
class NA_XinZhao_Jng_Taric(Ratings):
pass
class NA_XinZhao_Jng_Teemo(Ratings):
pass
class NA_XinZhao_Jng_Thresh(Ratings):
pass
class NA_XinZhao_Jng_Tristana(Ratings):
pass
class NA_XinZhao_Jng_Trundle(Ratings):
pass
class NA_XinZhao_Jng_Tryndamere(Ratings):
pass
class NA_XinZhao_Jng_TwistedFate(Ratings):
pass
class NA_XinZhao_Jng_Twitch(Ratings):
pass
class NA_XinZhao_Jng_Udyr(Ratings):
pass
class NA_XinZhao_Jng_Urgot(Ratings):
pass
class NA_XinZhao_Jng_Varus(Ratings):
pass
class NA_XinZhao_Jng_Vayne(Ratings):
pass
class NA_XinZhao_Jng_Veigar(Ratings):
pass
class NA_XinZhao_Jng_Velkoz(Ratings):
pass
class NA_XinZhao_Jng_Vi(Ratings):
pass
class NA_XinZhao_Jng_Viktor(Ratings):
pass
class NA_XinZhao_Jng_Vladimir(Ratings):
pass
class NA_XinZhao_Jng_Volibear(Ratings):
pass
class NA_XinZhao_Jng_Warwick(Ratings):
pass
class NA_XinZhao_Jng_Xayah(Ratings):
pass
class NA_XinZhao_Jng_Xerath(Ratings):
pass
class NA_XinZhao_Jng_XinZhao(Ratings):
pass
class NA_XinZhao_Jng_Yasuo(Ratings):
pass
class NA_XinZhao_Jng_Yorick(Ratings):
pass
class NA_XinZhao_Jng_Zac(Ratings):
pass
class NA_XinZhao_Jng_Zed(Ratings):
pass
class NA_XinZhao_Jng_Ziggs(Ratings):
pass
class NA_XinZhao_Jng_Zilean(Ratings):
pass
class NA_XinZhao_Jng_Zyra(Ratings):
pass
|
# coding= utf-8
import os, sys, io, re, json, glob
from collections import OrderedDict
def listdir_nohidden(path):
return glob.glob(os.path.join(path, '*'))
mapp_ordlistor = 'ordlistorText/'
ordlistor = listdir_nohidden(mapp_ordlistor)
mapp_data = 'data/'
fillista = listdir_nohidden(mapp_data)
for filnamn in fillista:
print ("Arbetar med: " + filnamn)
with open(filnamn, 'r+b') as fil_data:
text = fil_data.read().decode('utf_8')
for ordlista in ordlistor:
# print ("Ordlista: " + ordlista)
f = io.open(ordlista, mode="r", encoding="utf-8") # Open utf-encoded JSON
lista = json.load(f, object_pairs_hook=OrderedDict) # Load json i order
for a, b in lista['ordpar'].items():
if lista['regex']:
text = re.sub(a, b, text, flags=re.DOTALL)
else:
text = text.replace(a, b)
fil_data.seek(0)
fil_data.write(text.encode('utf8'))
fil_data.truncate()
|
import numpy as np
import pandas as pd
import descarteslabs as dl
LS = "landsat:LC08:01:RT:TOAR"
S2 = "sentinel-2:L1C"
def get_bands_from_platform(platform):
if platform == S2:
bands=["blue","green","red", "red-edge-2","nir","water-vapor","ndvi", "alpha"]
scales=[[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 65535, -1, 1],
None]
elif platform == LS:
bands=["blue","green","red","nir","ndvi", "alpha"]
scales=[[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 10000, 0, 1],
[0, 65535, -1, 1],
None]
return bands, scales
def aoi_from_latlon(ylat, xlong):
"""
Example:
lat = 38.459702
long = -122.438332
aoi = aoi_from_latlon(lat, long)
"""
# Intent is for an AOI of a single pixel
# Approx 12 cm, ie <1m
dx = 0.000001
dy = 0.000001
aoi = {
"type": "Polygon",
"coordinates": [
[
[xlong, ylat],
[xlong, ylat+dy],
[xlong+dx, ylat+dy],
[xlong+dx, ylat],
[xlong, ylat],
]
],
}
return aoi
def fc_from_latlong(lat, long, start="2018-01-01", end="2018-12-31", platform="sentinel-2:L1C"):
"""
Platforms:
landsat = "landsat:LC08:01:RT:TOAR"
sentinel2 = "sentinel-2:L1C"
"""
aoi = aoi_from_latlon(lat, long)
metadata_client = dl.Metadata()
# Image Search
fc = metadata_client.search(
products=platform,
geom=aoi,
start_datetime=start,
end_datetime=end,
limit=365,
)
return fc
def meta_from_fc(fc, fmt="dict"):
"""
Collects key parameters of the scenes in a feature collection
j = meta_from_fc(fc, fmt="dict")
"""
fid = [""] * nk
datestring = [""] * nk
cirrus_fraction = [np.nan] * nk
cloud_fraction = [np.nan] * nk
opaque_fraction = [np.nan] * nk
solar_azimuth_angle = [np.nan] * nk
solar_elevation_angle = [np.nan] * nk
view_angle = [np.nan] * nk
azimuth_angle = [np.nan] * nk
raa = [np.nan] * nk
sza = [np.nan] * nk
vza = [np.nan] * nk
for k in range(nk):
fid[k] = fc[k]['id']
datestring[k] = fc[k]['properties']['acquired']
image = images[k]
ndvi[k] = image[10,10,0]
azimuth_angle[k] = fc[k]['properties']['azimuth_angle']*np.pi/180.
cirrus_fraction[k] = fc[k]['properties']['cirrus_fraction']
cloud_fraction[k] = fc[k]['properties']['cloud_fraction']
degraded_fraction_0[k] = fc[k]['properties']['degraded_fraction_0']
opaque_fraction[k] = fc[k]['properties']['opaque_fraction']
solar_azimuth_angle[k] = fc[k]['properties']['solar_azimuth_angle']*np.pi/180.
solar_elevation_angle[k] = fc[k]['properties']['solar_elevation_angle']
view_angle[k] = fc[k]['properties']['view_angle']
raa[k] = np.sin(solar_azimuth_angle[k] - azimuth_angle[k])
sza[k] = np.cos((90. - solar_elevation_angle[k])*np.pi/180.)
vza[k] = np.cos((view_angle[k])*np.pi/180.)
data = {
'fid': fid,
'utc': datestring,
'ndvi': ndvi,
'cirrus_fraction': cirrus_fraction,
'cloud_fraction': cloud_fraction,
'opaque_fraction': opaque_fraction,
'raa': raa,
'cossza': sza,
'cosvza': vza,
}
df = pd.DataFrame(data=data)
df['utc'] = pd.to_datetime(df['utc'])
df['date'] = df['utc'].dt.date
df = df.sort_values('utc')
columns = ['date','utc','fid', 'cossza', 'cosvza', 'raa',
'cirrus_fraction', 'cloud_fraction', 'opaque_fraction']
df = df[columns]
if fmt == "dict":
j = df.to_dict(orient='split')
# df = pd.DataFrame.from_records(data=j['data'], columns = j['columns'])
elif fmt == "json":
j = df.to_json(orient='split', date_format='iso', double_precision=4, date_unit='s')
# j = json.loads(jstring)
# df = pd.DataFrame.from_records(data=j['data'], columns = j['columns'])
else:
j = df
return j
def images_from_fids(aoi, fids):
raster_client = dl.Raster()
images = list()
for f in fids:
feat_id = feat["id"]
arr, meta = raster_client.ndarray(
f,
cutline=aoi,
bands=["ndvi", "alpha"],
scales=[[0, 65535, -1, 1], None],
data_type="Float32",
)
images.append(arr)
return images, meta['wgs84Extent']
def dims_from_images(images):
nk = len(images)
ni = len(images[0])
nj = len(images[0][0])
nb = len(images[0][0][0])
dims = {
'nk': nk,
'ni': ni,
'nj': nj,
'nb': nb
}
return dims
def pixel_from_latlong(meta, dims, lat, long):
xmeta, ymeta = zip(*meta['wgs84Extent']['coordinates'][0])
dx = max(xmeta)-min(xmeta)
dy = max(ymeta)-min(ymeta)
nk = dims['nk']
ni = dims['ni']
nj = dims['nj']
dxdP = dx/ni
dydP = dy/nj
ix = int((long - min(xmeta))/dxdP)
jy = int((max(ymeta) - lat)/dydP)
px = {
'ix': ix,
'jy': jy
}
return px
def images_to_timeseries(fc, images, platform, dims, px):
nk = dims['nk']
ix = px['ix']
jy = px['jy']
blue = np.zeros((nk))
green = np.zeros((nk))
red = np.zeros((nk))
nir = np.zeros((nk))
ndvi = np.zeros((nk))
datestring = [""] * nk
if platform == S2:
red_edge_2 = np.zeros((nk))
water_vapor = np.zeros((nk))
for k in range(nk):
image = images[k]
if platform == S2:
blue[k] = image[ix,jy,0]
green[k] = image[ix,jy,1]
red[k] = image[ix,jy,2]
red_edge_2[k] = image[ix,jy,3]
nir[k] = image[ix,jy,4]
water_vapor[k] = image[ix,jy,5]
ndvi[k] = image[ix,jy,6]
else:
blue[k] = image[ix,jy,0]
green[k] = image[ix,jy,1]
red[k] = image[ix,jy,2]
nir[k] = image[ix,jy,3]
ndvi[k] = image[ix,jy,4]
datestring[k] = fc["features"][k]['properties']['acquired']
if platform == S2:
d = {'UTC': datestring,
'blue': blue,
'green': green,
'red': red,
'red-edge-2': red_edge_2,
'nir': nir,
'water-vapor': water_vapor,
'ndvi': ndvi}
else:
d = {'UTC': datestring,
'blue': blue,
'green': green,
'red': red,
'nir': nir,
'ndvi': ndvi}
df = pd.DataFrame(data=d)
df['UTC'] = pd.to_datetime(df['UTC'])
df = df.sort_values('UTC')
return df
|
from django.urls import path
from . import views
from .views import IndexView, PythonCreateView
urlpatterns = [
path('', IndexView.as_view(), name="index"),
path('create/', PythonCreateView.as_view(), name="create"),
]
|
# coding: utf-8
# ### 1. 数据预处理
# In[1]:
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras import backend as K
num_classes = 10
img_rows, img_cols = 28, 28
# 通过Keras封装好的API加载MNIST数据。其中trainX就是一个60000 * 28 * 28的数组,
# trainY是每一张图片对应的数字。
(trainX, trainY), (testX, testY) = mnist.load_data()
# 根据对图像编码的格式要求来设置输入层的格式。
if K.image_data_format() == 'channels_first':
trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols)
testX = testX.reshape(testX.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1)
testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.0
testX /= 255.0
# 将标准答案转化为需要的格式(one-hot编码)。
trainY = keras.utils.to_categorical(trainY, num_classes)
testY = keras.utils.to_categorical(testY, num_classes)
# ### 2. 通过Keras的API定义卷机神经网络。
# In[2]:
# 使用Keras API定义模型。
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# 定义损失函数、优化函数和评测方法。
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=['accuracy'])
# ### 3. 通过Keras的API训练模型并计算在测试数据上的准确率。
# In[3]:
model.fit(trainX, trainY,
batch_size=128,
epochs=10,
validation_data=(testX, testY))
# 在测试数据上计算准确率。
score = model.evaluate(testX, testY)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
# DeepSpeed note, some parts of code taken & adapted from commit c368a9fd1b2c9dee4cc94de9a6bb0be3d447be41
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_softmax.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_matmul.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/utils
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import SparseAttnBuilder
if not deepspeed.ops.__compatible_ops__[SparseAttnBuilder.NAME]:
pytest.skip("sparse attention op is not compatible on this system",
allow_module_level=True)
def test_sparse_attention_module_availability():
return True
try:
from deepspeed.ops import sparse_attention
except ImportError:
print("Sparse Attention Module is not installed!")
return False
return True
def test_matmul_module_availability():
return True
try:
from deepspeed.ops.sparse_attention.matmul import MatMul
except ImportError:
print("Sparse MatMul Module is not installed!")
return False
return True
def test_softmax_module_availability():
return True
try:
from deepspeed.ops.sparse_attention.softmax import Softmax
except ImportError:
print("Sparse Softmax Module is not installed!")
return False
return True
def test_sparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import SparsityConfig
except ImportError:
print("SparsityConfig Module is not installed!")
return False
return True
def test_densesparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import DenseSparsityConfig
except ImportError:
print("DenseSparsityConfig Module is not installed!")
return False
return True
def test_fixedsparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import FixedSparsityConfig
except ImportError:
print("FixedSparsityConfig Module is not installed!")
return False
return True
def test_variablesparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import VariableSparsityConfig
except ImportError:
print("VariableSparsityConfig Module is not installed!")
return False
return True
def test_bigbirdsparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import BigBirdSparsityConfig
except ImportError:
print("BigBirdSparsityConfig Module is not installed!")
return False
return True
def test_bslongformersparsityconfig_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import BSLongformerSparsityConfig
except ImportError:
print("BSLongformerSparsityConfig Module is not installed!")
return False
return True
def test_sparseselfattention_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import SparseSelfAttention
except ImportError:
print("SparseSelfAttention Module is not installed!")
return False
return True
def test_bertsparseselfattention_module_availability():
return True
try:
from deepspeed.ops.sparse_attention import BertSparseSelfAttention
except ImportError:
print("BertSparseSelfAttention Module is not installed!")
return False
return True
def test_sparseattentionutils_availability():
return True
try:
from deepspeed.ops.sparse_attention import SparseAttentionUtils
except ImportError:
print("SparseAttentionUtils Module is not installed!")
return False
return True
def test_cpp_utils_availability():
return True
try:
from deepspeed.ops.sparse_attention import cpp_utils
except ImportError:
print("Sparse Attention cpp_utils Module is not installed!")
return False
return True
def dense_to_sparse(w, mask, block):
"""Converts dense matrix with explicit zeros to sparse matrix
"""
Z = w.size(0)
ret = torch.empty((Z, mask.sum(), block, block), dtype=w.dtype, device=w.device)
nnz = mask.nonzero()
h, i, j = nnz[:, 0], nnz[:, 1], nnz[:, 2]
for zz in range(Z):
for idx, (hh, ii, jj) in enumerate(zip(h, i, j)):
ret[zz, idx, :, :] = w[zz, hh, ii*block: (ii+1)*block, jj*block: (jj+1)*block]
return ret
def sparse_to_dense(w, mask, block, zero=0):
"""Converts sparse matrix to dense matrix with explicit zeros
"""
maskedw = w.clone()
for bz, wz in enumerate(range(0, w.size(0))):
for bh, wh in enumerate(range(0, w.size(1))):
for bi, wi in enumerate(range(0, w.size(2), block)):
for bj, wj in enumerate(range(0, w.size(3), block)):
if mask[bh, bi, bj] == 0:
maskedw[wz, wh, wi:wi + block, wj:wj + block] = zero
#maskedw[wz, wh, wi : wi+block, wj : wj+block] *= mask[bh, bi, bj]
return maskedw
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def make_layout(rho, shape):
probs = torch.Tensor([rho, 1 - rho])
generator = torch.distributions.categorical.Categorical(probs)
layout = generator.sample(shape)
return layout
def run_softmax_reference(x, scale, dx, kp_mask, attn_mask, layout, block):
x = sparse_to_dense(x, layout, block, zero=float('-inf'))
x.retain_grad()
if kp_mask is not None:
bcattn_mask = attn_mask[None, None, :, :] + torch.zeros_like(x)
x[bcattn_mask == 0] = float('-inf')
y = torch.softmax(x * scale + kp_mask[:, None, None, :], -1)
else:
y = torch.softmax(x * scale, -1)
y.backward(dx)
dx = x.grad.clone()
dx = dense_to_sparse(dx, layout, block)
y = dense_to_sparse(y, layout, block)
return y, dx
def run_softmax_sparse(x, scale, dx, kp_mask, attn_mask, layout, block):
from deepspeed.ops.sparse_attention.softmax import Softmax
sparse_softmax = Softmax(layout, block, bench=False)
dx = dense_to_sparse(dx, layout, block)
x = dense_to_sparse(x, layout, block)
x.retain_grad()
y = sparse_softmax(x,
scale=scale,
key_padding_mask=kp_mask,
key_padding_mask_mode='add',
attn_mask=attn_mask,
attn_mask_mode='mul')
y.backward(dx)
dx = x.grad.clone()
x.grad.zero_()
return x, dx
def init_softmax_inputs(Z, H, M, N, scale, rho, block, dtype, dense_x=True, layout=None):
if layout is None:
layout = make_layout(rho, (H, M // block, N // block))
if dense_x:
x = torch.rand((Z, H, M, N), dtype=dtype, requires_grad=True, device='cuda')
else:
x = torch.rand((Z,
layout.sum(),
block,
block),
dtype=dtype,
requires_grad=True,
device='cuda')
dx = torch.rand_like(x)
bool_attn_mask = torch.randint(low=0,
high=2,
size=(N,
N),
dtype=torch.bool,
requires_grad=False,
device='cuda')
fp_attn_mask = bool_attn_mask.type(dtype)
kp_mask = torch.randint(low=0,
high=2,
size=(Z,
N),
dtype=dtype,
requires_grad=False,
device='cuda')
kp_mask[kp_mask == 1.] = float('-inf')
return layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask
def _skip_on_cuda_compatability():
if torch.cuda.get_device_capability()[0] < 7:
pytest.skip("needs higher compute capability than 7")
cuda_major = int(torch.version.cuda.split('.')[0]) * 10
cuda_minor = int(torch.version.cuda.split('.')[1])
cuda_version = cuda_major + cuda_minor
if (cuda_version != 101 and cuda_version != 102) and \
(cuda_version != 111 and cuda_version != 110):
pytest.skip("requires cuda 10.1 or 10.2 or 11.0 or 11.1")
@pytest.mark.parametrize("block", [16, 32])
@pytest.mark.parametrize("width", [256, 576])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_softmax(block, width, dtype):
_skip_on_cuda_compatability()
Z = 2
H = 4
scale = 0.4
rho = 0.4
M = N = width
layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask = init_softmax_inputs(Z, H, M, N, scale, rho, block, dtype, layout=None)
ref_y, ref_dx = run_softmax_reference(x, scale, dx, kp_mask, bool_attn_mask, layout, block)
st_y, st_dx = run_softmax_sparse(x, scale, dx, kp_mask, fp_attn_mask, layout, block)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
def run_matmul_reference(x, w, mode, trans_a, trans_b, layout, block, dy):
x = sparse_to_dense(x, layout, block) if mode == 'dsd' else x
w = sparse_to_dense(w, layout, block) if mode == 'dds' else w
x.retain_grad()
w.retain_grad()
xx = x.transpose(2, 3) if trans_a else x
ww = w.transpose(2, 3) if trans_b else w
y = torch.matmul(xx, ww)
y = sparse_to_dense(y, layout, block) if mode == 'sdd' else y
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
w.grad.zero_()
y = dense_to_sparse(y, layout, block) if mode == 'sdd' else y
dx = dense_to_sparse(dx, layout, block) if mode == 'dsd' else dx
dw = dense_to_sparse(dw, layout, block) if mode == 'dds' else dw
return y, dx, dw
def run_matmul_sparse(x, w, mode, trans_a, trans_b, layout, block, dy):
from deepspeed.ops.sparse_attention.matmul import MatMul
x = dense_to_sparse(x, layout, block) if mode == 'dsd' else x
w = dense_to_sparse(w, layout, block) if mode == 'dds' else w
dy = dense_to_sparse(dy, layout, block) if mode == 'sdd' else dy
op = MatMul(layout, block, mode, trans_a=trans_a, trans_b=trans_b)
x.retain_grad()
w.retain_grad()
y = op(x, w)
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
return y, dx, dw
def init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout):
torch.manual_seed(1)
AS0 = K if trans_a else M
AS1 = M if trans_a else K
BS0 = N if trans_b else K
BS1 = K if trans_b else N
shape = {'sdd': (M, N), 'dsd': (AS0, AS1), 'dds': (BS0, BS1)}[mode]
x = torch.rand((Z, H, AS0, AS1), dtype=dtype, requires_grad=True, device='cuda')
w = torch.rand((Z, H, BS0, BS1), dtype=dtype, requires_grad=True, device='cuda')
dy = torch.rand((Z, H, M, N), dtype=dtype, device='cuda')
if layout is None:
layout = make_layout(rho, (H, shape[0] // block, shape[1] // block))
else:
assert list(layout.shape) == [H, shape[0] // block, shape[1] // block]
x.retain_grad()
w.retain_grad()
return x, w, dy, shape, layout
testdata = [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['sdd', 'dds']\
for trans_a in [False]\
for trans_b in [False, True]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['dsd']\
for trans_a in [False, True]\
for trans_b in [False]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float32]\
for mode in ['sdd', 'dsd', 'dds']\
for trans_a in [False]\
for trans_b in [False]\
] + [
(block, torch.float16, mode, False, False)\
for block in [16, 32, 64]\
for mode in ['sdd', 'dsd', 'dds']\
]
@pytest.mark.parametrize("block, dtype, mode, trans_a, trans_b", testdata)
def test_matmul(block, dtype, mode, trans_a, trans_b):
_skip_on_cuda_compatability()
Z = 3
H = 2
M = 128
N = 256
K = 192
rho = 0.5
x, w, dy, shape, layout = init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout=None)
ref_y, ref_dx, ref_dw = run_matmul_reference(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
st_y, st_dx, st_dw = run_matmul_sparse(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
assert allclose(ref_dw, st_dw)
|
#!/usr/bin/env python3
# coding: utf-8
from rdbox.rdbox_node_formatter import RdboxNodeFormatter
import rdbox.config
from logging import getLogger
r_logger = getLogger('rdbox_cli')
r_print = getLogger('rdbox_cli').getChild("stdout")
class BlockstoreRdboxNodeFormatter(RdboxNodeFormatter):
def output_report(self, rdbox_node_list):
output_str = ""
grouping_dict = rdbox_node_list.group_by("location")
matchCount = 0
for key, list_of_group in grouping_dict.items():
if key == 'edge':
continue
else:
matchCount = matchCount + len(list_of_group)
maxReplicas = int(rdbox.config.get('apps', 'openebs_max_replicas'))
if matchCount >= maxReplicas:
matchCount = maxReplicas
output_str = str(matchCount - 1)
return rdbox_node_list, output_str
|
import argparse
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cakechat.utils.env import init_cuda_env
init_cuda_env()
from cakechat.dialog_model.factory import get_reverse_model
from cakechat.dialog_model.model import CakeChatModel
from cakechat.dialog_model.model_utils import transform_contexts_to_token_ids, lines_to_context
from cakechat.dialog_model.quality import log_predictions, calculate_and_log_val_metrics
from cakechat.utils.files_utils import is_non_empty_file
from cakechat.utils.logger import get_tools_logger
from cakechat.utils.data_types import ModelParam
from cakechat.utils.dataset_loader import get_tokenized_test_lines, load_context_free_val, \
load_context_sensitive_val, get_validation_data_id, get_validation_sets_names
from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path
from cakechat.utils.w2v.model import get_w2v_model_id
from cakechat.config import BASE_CORPUS_NAME, QUESTIONS_CORPUS_NAME, INPUT_SEQUENCE_LENGTH, INPUT_CONTEXT_SIZE, \
PREDICTION_MODES, PREDICTION_MODE_FOR_TESTS, RESULTS_PATH, DEFAULT_TEMPERATURE, TRAIN_CORPUS_NAME, \
USE_PRETRAINED_W2V_EMBEDDINGS_LAYER
_logger = get_tools_logger(__file__)
def _save_test_results(test_dataset, predictions_filename, nn_model, prediction_mode, **kwargs):
context_sensitive_val = load_context_sensitive_val(nn_model.token_to_index, nn_model.condition_to_index)
context_free_val = load_context_free_val(nn_model.token_to_index)
calculate_and_log_val_metrics(nn_model, context_sensitive_val, context_free_val, prediction_mode,
calculate_ngram_distance=False)
test_dataset_ids = transform_contexts_to_token_ids(
list(lines_to_context(test_dataset)), nn_model.token_to_index, INPUT_SEQUENCE_LENGTH, INPUT_CONTEXT_SIZE)
log_predictions(predictions_filename, test_dataset_ids, nn_model, prediction_modes=[prediction_mode], **kwargs)
def predict(model_path,
tokens_index_path=None,
conditions_index_path=None,
default_predictions_path=None,
reverse_model_weights=None,
temperatures=None,
prediction_mode=None):
if not tokens_index_path:
tokens_index_path = get_index_to_token_path(BASE_CORPUS_NAME)
if not conditions_index_path:
conditions_index_path = get_index_to_condition_path(BASE_CORPUS_NAME)
if not temperatures:
temperatures = [DEFAULT_TEMPERATURE]
if not prediction_mode:
prediction_mode = PREDICTION_MODE_FOR_TESTS
# Construct list of parameters values for all possible combinations of passed parameters
prediction_params = [dict()]
if reverse_model_weights:
prediction_params = [
dict(params, mmi_reverse_model_score_weight=w)
for params in prediction_params
for w in reverse_model_weights
]
if temperatures:
prediction_params = [dict(params, temperature=t) for params in prediction_params for t in temperatures]
if not is_non_empty_file(tokens_index_path):
_logger.warning('Couldn\'t find tokens_index file:\n{}. \nExiting...'.format(tokens_index_path))
return
index_to_token = load_index_to_item(tokens_index_path)
index_to_condition = load_index_to_item(conditions_index_path)
w2v_model_id = get_w2v_model_id() if USE_PRETRAINED_W2V_EMBEDDINGS_LAYER else None
nn_model = CakeChatModel(
index_to_token,
index_to_condition,
training_data_param=ModelParam(value=None, id=TRAIN_CORPUS_NAME),
validation_data_param=ModelParam(value=None, id=get_validation_data_id(get_validation_sets_names())),
w2v_model_param=ModelParam(value=None, id=w2v_model_id),
model_init_path=model_path,
reverse_model=get_reverse_model(prediction_mode))
nn_model.init_model()
nn_model.resolve_model()
if not default_predictions_path:
default_predictions_path = os.path.join(RESULTS_PATH, 'results', 'predictions_' + nn_model.model_name)
# Get path for each combination of parameters
predictions_paths = []
# Add suffix to the filename only for parameters that have a specific value passed as an argument
# If no parameters were specified, no suffix is added
if len(prediction_params) > 1:
for cur_params in prediction_params:
cur_path = '{base_path}_{params_str}.tsv'.format(
base_path=default_predictions_path,
params_str='_'.join(['{}_{}'.format(k, v) for k, v in cur_params.items()]))
predictions_paths.append(cur_path)
else:
predictions_paths = [default_predictions_path + '.tsv']
_logger.info('Model for prediction: {}'.format(nn_model.model_path))
_logger.info('Tokens index: {}'.format(tokens_index_path))
_logger.info('File with questions: {}'.format(QUESTIONS_CORPUS_NAME))
_logger.info('Files to dump responses: {}'.format('\n'.join(predictions_paths)))
_logger.info('Prediction parameters {}'.format('\n'.join([str(x) for x in prediction_params])))
processed_test_set = get_tokenized_test_lines(QUESTIONS_CORPUS_NAME, set(index_to_token.values()))
for cur_params, cur_path in zip(prediction_params, predictions_paths):
_logger.info('Predicting with the following params: {}'.format(cur_params))
_save_test_results(processed_test_set, cur_path, nn_model, prediction_mode, **cur_params)
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'-p', '--prediction-mode', action='store', help='Prediction mode', choices=PREDICTION_MODES, default=None)
argparser.add_argument(
'-m',
'--model',
action='store',
help='Path to the file with your model. '
'Be careful, model parameters are inferred from config, not from the filename',
default=None)
argparser.add_argument(
'-i',
'--tokens_index',
action='store',
help='Path to the json file with index_to_token dictionary.',
default=None)
argparser.add_argument(
'-c',
'--conditions_index',
action='store',
help='Path to the json file with index_to_condition dictionary.',
default=None)
argparser.add_argument(
'-o',
'--output',
action='store',
help='Path to the file to dump predictions.'
'Be careful, file extension ".tsv" is appended to the filename automatically',
default=None)
argparser.add_argument(
'-r',
'--reverse-model-weights',
action='append',
type=float,
help='Reverse model score weight for prediction with MMI-reranking objective. Used only in *-reranking modes',
default=None)
argparser.add_argument('-t', '--temperatures', action='append', help='temperature values', default=None, type=float)
args = argparser.parse_args()
# Extra params validation
reranking_modes = [PREDICTION_MODES.beamsearch_reranking, PREDICTION_MODES.sampling_reranking]
if args.reverse_model_weights and args.prediction_mode not in reranking_modes:
raise Exception('--reverse-model-weights param can be specified only for *-reranking prediction modes.')
return args
if __name__ == '__main__':
args = vars(parse_args())
predict(args.pop('model'), args.pop('tokens_index'), args.pop('conditions_index'), args.pop('output'), **args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 13:12:03 2021
@author: jane_hsieh
Goal: Find out the potential anomalies of variable –"Tran Count", and visualize the results
Here we use a person's bank account transaction record, in the case of 'Japan', 'Australia',or 'USA'
Resource:
1. For simple univariate anomaly detection, we can refer to the following for tutorial:
https://www.ericsson.com/en/blog/2020/4/anomaly-detection-with-machine-learning
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
data_dir = './Data'
#change your current working directory
os.chdir('/Users/jane_hsieh/OneDrive - nctu.edu.tw/Data Science Analysis Templates/Anomaly Detection/Univariate Anomaly Detection')
os.getcwd()
# parameters -----------------------------------#-
country = 'Japan' #'Japan', 'Australia', 'USA' #-
# ----------------------------------------------#-
# ==================================== 0. Input data: FRM prices ====================================================
df = pd.read_excel(data_dir+'/Overseas spend(WH).xlsx', sheet_name=country, parse_dates=['CPD Date'], index_col = 'CPD Date')
#0.1 Reorganize data
df.sort_index(inplace=True)
print('The earliest date of data is:\t', df.index.min())
print('The latest date of data is:\t', df.index.max())
### reset Datetime index from earliest to latest date (daily), in case of missing dates
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq = 'D'))
### Check if any missing date (missing data)
print('Check if any missing data in each variable:\n',df.isnull().sum())
'''
Fortunately, there's no missing data! Now we have a series of data from 2017-11-01 to 2019-06-30 '
'''
#df['Tran Count'].plot(figsize=(30,10))
#plt.show()
# ==================================== 1. Anomaly Detection: Find Anomalies Using Mean +/- 1.96*SD ====================================================
# 1.1 Calculate the past statistics (mean, std), with a window size (i.e., past window days [including current day] of data would be summarized)
# Note: to reduce lag effect, instead of using 'past window', you can use the window centored at current day (center=True)
# parameters -----------------------------------#-
window = 31 #-
centered = False #True # or False #-
threshold = 1.96 #-
# ----------------------------------------------#-
rolling_stats = df['Tran Count'].rolling(window, center=centered).agg(['mean', 'std']) #<<<<<<<<<<<<<<<<<<<<<<<
df2 = df.join(rolling_stats) #combine the column-wise data into df
df2['Upper_SD'] = df2['mean'] + threshold * df2['std'] #upper bound of 95% confidence interval
df2['Lower_SD'] = df2['mean'] - threshold * df2['std'] #lower bound of 95% confidence interval
## Find possiblee anomalies by Mean +/- 1.96*SD
def Is_Anomaly(x):
if pd.isna(x[['Tran Count', 'mean','std']]).sum() >0:
return np.nan
z = abs(x['Tran Count'] - x['mean'])/x['std']
if z > threshold:
return 1 # outlier
else:
return 0 # normal
anomalies = df2.apply(Is_Anomaly, axis=1)
anomalies.name = 'anomalies'
print('The percentage of anomalies in data is {:.2f}%'.format(np.mean(anomalies)*100))
df2 = df2.join(anomalies)
# 1.2. Visualization of the results -----------------------------------------------------------------
anomalies = df2[df2['anomalies']==1]
fig, ax = plt.subplots(figsize=(30,10))
ax.plot(df2.index, df2['Tran Count'], linestyle = '-', color='b', label='Tran Count')
ax.plot(df2.index, df2['mean'], linestyle = '-', color='r', label='Mean')
ax.plot(df2.index, df2['Upper_SD'], linestyle = '--', color='g', label = r'Mean $\pm$ 1.96*SD')
ax.plot(df2.index, df2['Lower_SD'], linestyle = '--', color='g')
ax.scatter( anomalies.index, anomalies['Tran Count'], color = 'r' )
#legend = ax.legend(loc="upper right", edgecolor="black")
#legend.get_frame().set_alpha(None)
#legend.get_frame().set_facecolor((0, 0, 0, 0))
#ax.set_title(f'{country} - TS plot detecting anomalies with windowsize {window} (center={str(centered)})')
plt.show()
plt.savefig(f'{country} - TS plot detecting anomalies with windowsize {window} (center={str(centered)}).png', transparent=True) #<<<<<<<<<<<<<<<<<<<<<<<
'''
Mean and SD can change drastically due to extreme values (possible anomalies)
To reduce the impact of extreme values, we may use Median and Median-absolute-deviation (MAD),
insted of mean and std, as standards for detecting anomalies
'''
# ==================================== 2. Anomaly Detection: Find Anomalies Using Median +/- 1.96*MAD ====================================================
# 1.1 Calculate the past statistics (median, MAD), with a window size (i.e., past window days [including current day] of data would be summarized)
# Note: to reduce lag effect, instead of using 'past window', you can use the window centored at current day (center=True)
# parameters -----------------------------------#-
threshold = 1.96 #-
# ----------------------------------------------#-
from scipy import stats
#x = df[:31]
#stats.median_abs_deviation(x['Tran Count'])
rolling_mdn = df['Tran Count'].rolling(window, center=centered).median()
rolling_mdn.name = 'median'
rolling_MAD = df['Tran Count'].rolling(window, center=centered).apply(stats.median_abs_deviation)
rolling_MAD.name = 'MAD'
df3 = df.join([rolling_mdn, rolling_MAD]) #combine the column-wise data into df
df3['Upper_MAD'] = df3['median'] + threshold * df3['MAD'] #upper bound of robust 95% confidence interval
df3['Lower_MAD'] = df3['median'] - threshold * df3['MAD'] #lower bound of robust 95% confidence interval
## Find possiblee anomalies by Mean +/- 1.96*SD
def Is_Anomaly_MAD(x):
if pd.isna(x[['Tran Count', 'median','MAD']]).sum() >0:
return np.nan
z = abs(x['Tran Count'] - x['median'])/x['MAD']
if z > threshold:
return 1 # outlier
else:
return 0 # normal
anomalies = df3.apply(Is_Anomaly_MAD, axis=1)
anomalies.name = 'anomalies'
print('The percentage of anomalies in data is {:.2f}%'.format(np.mean(anomalies)*100))
df3 = df3.join(anomalies)
# 1.2. Visualization of the results -----------------------------------------------------------------
anomalies = df3[df3['anomalies']==1]
fig, ax = plt.subplots(figsize=(30,10))
ax.plot(df3.index, df3['Tran Count'], linestyle = '-', color='b', label='Tran Count')
ax.plot(df3.index, df3['median'], linestyle = '-', color='r', label='Median')
ax.plot(df3.index, df3['Upper_MAD'], linestyle = '--', color='g', label = r'Median $\pm$ 1.96*MAD')
ax.plot(df3.index, df3['Lower_MAD'], linestyle = '--', color='g')
ax.scatter( anomalies.index, anomalies['Tran Count'], color = 'r' )
#legend = ax.legend(loc="upper right", edgecolor="black")
#legend.get_frame().set_alpha(None)
#legend.get_frame().set_facecolor((0, 0, 0, 0))
#ax.set_title(f'{country} - TS plot detecting anomalies with windowsize {window} (center={str(centered)})')
plt.show()
plt.savefig(f'{country} - TS plot detecting robust anomalies with windowsize {window} (center={str(centered)}).png', transparent=True) #<<<<<<<<<<<<<<<<<<<<<<<
|
from random import randint
class MyPlayer(object):
'''
Random reversi player class.
'''
move_i = 0
def __init__(self, my_color, opponent_color):
self.name = 'random'
self.my_color = my_color
self.opponentColor = opponent_color
self.moveCount = 1
def move(self,board):
boardSize = len(board)
possible = []
for x in range(boardSize):
for y in range(boardSize):
if (board[x][y] == -1) and self.is_correct_move([x,y],board,boardSize):
possible.append((x,y))
possible_moves = len(possible)-1
if possible_moves < 0:
print('No possible move!')
return None
my_move = randint(0,possible_moves)
return possible[my_move]
def is_correct_move(self,move,board,boardSize):
dx = [-1,-1,-1,0,1,1,1,0]
dy = [-1,0,1,1,1,0,-1,-1]
for i in range(len(dx)):
if self.confirm_direction(move,dx[i],dy[i],board,boardSize):
return True
return False
def confirm_direction(self,move,dx,dy,board,boardSize):
posx = move[0]+dx
posy = move[1]+dy
if (posx>=0) and (posx<boardSize) and (posy>=0) and (posy<boardSize):
if board[posx][posy] == self.opponentColor:
while (posx>=0) and (posx<=(boardSize-1)) and (posy>=0) and (posy<=(boardSize-1)):
posx += dx
posy += dy
if (posx>=0) and (posx<boardSize) and (posy>=0) and (posy<boardSize):
if board[posx][posy] == -1:
return False
if board[posx][posy] == self.my_color:
return True
return False
|
import socket
import sys
def is_hostname_reverse_resolving_correctly():
'''Check if the primary IP addr for our host reverse-resolves correctly. Also returns true
if no reverse resolve defined. This is useful for server processes like Java that sometimes
use the '''
my_hostname = socket.gethostname()
my_ipaddr = socket.gethostbyname(my_hostname)
try:
(reverse_hostname, reverse_aliaslist, reverse_ipaddrlist) = socket.gethostbyaddr(my_ipaddr)
except socket.herror:
# Then there's no reverse-DNS, normal on a DHCP network.
return True
try:
reverse_hostname_to_ipaddr = socket.gethostbyname(reverse_hostname)
except:
print >>sys.stderr, "Warning: local hostname %s running on %s, but %s reverse-resolves to invalid host %s." % \
(my_hostname, my_ipaddr, my_ipaddr, reverse_hostname)
return False
return True
|
"""! This script has been borrowed and adapted. Original script:
https://github.com/pumpikano/tf-dann/blob/master/create_mnistm.py.
It creatse the MNIST-M dataset based on MNIST
"""
import tarfile
from typing import Any
import numpy as np
import skimage # type: ignore
import skimage.io # type: ignore
import skimage.transform # type: ignore
# pylint: disable=invalid-name, disable=no-member, bare-except
def compose_image(digit: Any, background: Any) -> Any:
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x : x + dw, y : y + dh]
return np.abs(bg - digit).astype(np.uint8)
def mnist_to_img(x: Any) -> Any:
"""Binarize MNIST digit and convert to RGB."""
x = (x > 0).float()
d = x.reshape([28, 28, 1]) * 255
return np.concatenate([d, d, d], 2)
def create_mnistm(X: Any) -> Any:
"""Give an array of MNIST digits, blend random background patches to build
the MNIST-M dataset as described in
http://jmlr.org/papers/volume17/15-239/15-239.pdf."""
bst_path = "./data/MNIST_M/BSR_bsds500.tgz"
rand = np.random.RandomState(42)
train_files = []
with tarfile.open(bst_path, "r") as bsr_file:
for name in bsr_file.getnames():
if name.startswith("BSR/BSDS500/data/images/train/"):
train_files.append(name)
print("Loading BSR training images")
background_data = []
for name in train_files:
try:
fp = bsr_file.extractfile(name)
bg_img = skimage.io.imread(fp)
background_data.append(bg_img)
except:
continue
X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)
for i in range(X.shape[0]):
if i % 1000 == 0:
print("Processing example", i)
bg_img = rand.choice(background_data)
d = mnist_to_img(X[i])
d = compose_image(d, bg_img)
X_[i] = d
return X_
|
''' show_authentication_sessions.py
IOSXE parsers for the following show commands:
* show authentication sessions
* show authentication sessions interface {intf}
* show authentication sessions interface {intf} details
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
'''
Device# show authentication sessions
Interface MAC Address Method Domain Status Session ID
Gi1/48 0015.63b0.f676 dot1x DATA Authz Success 0A3462B1000000102983C05C
Gi1/5 000f.23c4.a401 mab DATA Authz Success 0A3462B10000000D24F80B58
Gi1/5 0014.bf5d.d26d dot1x DATA Authz Success 0A3462B10000000E29811B94
'''
#==============================================
# Parser for 'show authentication sessions'
#==============================================
class ShowAuthenticationSessionsSchema(MetaParser):
"""Schema for show authentication sessions
show authentication sessions interface {intf}
"""
schema = {
'interfaces': {
Any(): {
'interface': str,
'client': {
Any(): {
'client': str,
'method': str,
'domain': str,
'status': str,
'session': {
Any(): {
'session_id': str,
}
}
}
}
}
}
}
class ShowAuthenticationSessions(ShowAuthenticationSessionsSchema):
"""Parser for 'show authentication sessions'
'show authentication sessions interface {intf}''
"""
cli_command = ['show authentication sessions', 'show authentication sessions interface {intf}']
def cli(self,intf=None,output=None):
if intf:
cmd = self.cli_command[1].format(intf=intf)
else:
cmd = self.cli_command[0]
if output is None:
# get output from device
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
# Interface MAC Address Method Domain Status Session ID
p1 = re.compile(r'^Interface +MAC +Address +Method +Domain +Status +Session +ID')
# Interface Identifier Method Domain Status Fg Session ID
p2 = re.compile(r'^Interface +Identifier +Method +Domain +Status +Fg +Session +ID')
# Matching patterns
# Gi1/48 0015.63b0.f676 dot1x DATA Authz Success 0A3462B1000000102983C05C
# Gi1/5 000f.23c4.a401 mab DATA Authz Success 0A3462B10000000D24F80B58
# Gi1/5 0014.bf5d.d26d dot1x DATA Authz Success 0A3462B10000000E29811B94
p4 = re.compile(r'^(?P<interface>\S+) +'
'(?P<client>\w+\.\w+\.\w+) +'
'(?P<method>\w+) +'
'(?P<domain>\w+) +'
'(?P<status>\w+(?: +\w+)?) +'
'(?P<session>\w+)$')
for line in out.splitlines():
line = line.strip()
# Ignore the title
if p1.match(line) or p2.match(line):
continue
# Gi1/0/48 0015.63b0.f676 dot1x DATA Authz Success 0A3462B1000000102983C05C
# Gi1/7/35 0000.0022.2222 dot1x UNKNOWN Auth 141927640000000E0B40EDB0
m = p4.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group['interface'])
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault(intf, {})
intf_dict['interface'] = intf
client = group['client']
client_dict = intf_dict.setdefault('client', {}).setdefault(client, {})
client_dict.update({'client': client})
client_dict['method'] = group['method']
client_dict['domain'] = group['domain']
client_dict['status'] = group['status']
session = group['session']
client_dict.setdefault('session', {}).setdefault(session, {})\
.setdefault('session_id', session)
continue
return ret_dict
#==================================================================================
# Parser for 'show authentication sessions interface {intf} details'
#==================================================================================
class ShowAuthenticationSessionsInterfaceDetailsSchema(MetaParser):
"""Schema for 'show authentication sessions interface {intf} details'
"""
schema = {
'interfaces': {
Any(): {
'interface': str,
'iif_id': str,
'mac_address': str, #MAC Address
'ipv6_address': str,
'ipv4_address': str,
'user_name': str,
'status': str,
'domain': str,
'oper_host_mode': str,
'oper_control_dir': str,
'session_timeout': str,
'common_session_id': str,
'acct_session_id': str,
'handle': str,
'current_policy': str,
'local_policies': {
'template': {
Any():{
'priority': int,
}
},
'vlan_group': {
'vlan': int,
}
},
'method_status': {
Any(): {
'method': str,
'state': str,
}
}
}
}
}
class ShowAuthenticationSessionsInterfaceDetails(ShowAuthenticationSessionsInterfaceDetailsSchema):
"""Parser for 'show authentication sessions interface {intf} details'
"""
cli_command = 'show authentication sessions interface {intf} details'
def cli(self, intf, output=None):
cmd = self.cli_command.format(intf=intf)
if output is None:
# get output from device
out = self.device.execute(cmd)
else:
out = output
# initial return dictionary
ret_dict = {}
# Interface: GigabitEthernet3/0/2
# IIF-ID: 0x1055240000001F6
# MAC Address: 0010.0010.0001
# IPv6 Address: Unknown
# IPv4 Address: 192.0.2.1
# User-Name: auto601
# Status: Authorized
# Domain: DATA
# Oper host mode: single-host
# Oper control dir: both
# Session timeout: N/A
# Common Session ID: AC14FC0A0000101200E28D62
# Acct Session ID: Unknown
# Handle: 0xDB003227
# Current Policy: dot1x_dvlan_reauth_hm
p1 = re.compile(r'^(?P<argument>\S[\w\s\-]+): +(?P<value>\S+)$')
# Local Policies:
p2 = re.compile(r'^Local +Policies:')
# Template: CRITICAL_VLAN (priority 150)
p3 = re.compile(r'^Template: +(?P<template>\w+) +\(priority +(?P<priority>[0-9]+)\)$')
# Vlan Group: Vlan: 130
p4 = re.compile(r'^Vlan +Group: +(?P<vlan_name>\w+): +(?P<vlan_value>[0-9]+)$')
# Method status list:
p5 = re.compile(r'^Method +status +list:')
# dot1x Authc Failed
p6 = re.compile(r'^(?P<method>[dot1x|mab]\w+) +(?P<state>(\w+\s\w+)|(\w+))$')
for line in out.splitlines():
line = line.strip()
# Ignore all titles
if p2.match(line) or p5.match(line):
continue
# match these lines:
# Interface: GigabitEthernet3/0/2
# IIF-ID: 0x1055240000001F6
# MAC Address: 0010.0010.0001
# IPv6 Address: Unknown
# IPv4 Address: 192.0.2.1
# User-Name: auto601
# Status: Authorized
# Domain: DATA
# Oper host mode: single-host
# Oper control dir: both
# Session timeout: N/A
# Common Session ID: AC14FC0A0000101200E28D62
# Acct Session ID: Unknown
# Handle: 0xDB003227
# Current Policy: dot1x_dvlan_reauth_hm
m = p1.match(line)
if m:
group = m.groupdict()
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault(intf, {})
key = re.sub(r'( |-)', '_',group['argument'].lower())
intf_dict.update({key: group['value']})
continue
# Template: CRITICAL_VLAN (priority 150)
m = p3.match(line)
if m:
group = m.groupdict()
template_dict = intf_dict.setdefault('local_policies', {}).setdefault('template', {})
priority_dict = template_dict.setdefault(group['template'], {})
priority_dict.update({'priority': int(group['priority'])})
continue
# Vlan Group: Vlan: 130
m = p4.match(line)
if m:
group = m.groupdict()
vlan_dict = intf_dict.setdefault('local_policies', {}).setdefault('vlan_group',{})
vlan_dict.update({'vlan': int(group['vlan_value'])})
continue
# dot1x Authc Failed
m = p6.match(line)
if m:
group = m.groupdict()
method_stat = intf_dict.setdefault('method_status', {}).setdefault(group['method'], {})
method_stat.update({'method': group['method']})
method_stat.update({'state': group['state']})
continue
return ret_dict;
|
import random
import os
from lib.flight import Flight
from lib.passenger import Passenger
import time
class Booker:
def __init__(self, destinations_list=None):
if destinations_list == None:
self.destinations = ["New York", "Paris", "London", "Tokyo"]
else:
self.destinations = destinations_list
self.flight_dtb = dict()
i = 0
for elem in self.destinations:
self.flight_dtb[elem] = Flight(elem)
i += 1
self.passport_dtb = []
self.passenger_dtb = dict()
self.ticket_list = []
self.current_user = None
self.main_menu_it = 0
self.exit = False
def main_menu(self):
self.exit = False
os.system("clear")
print("---- MENU ----")
known = False
same_user = False
if self.main_menu_it > 0:
choice = input("Are you still " + self.current_user.pseudo + "---- y/n")
if choice == "y":
pass_n = input("Enter your passport number ---- ")
if pass_n == self.current_user.passport_number:
print("Welcome back !")
same_user = True
else:
print(
"Your passport number didn't correspond ! Resetting the menu."
)
same_user = False
if not (same_user):
name = input("Enter your name ---- ")
pass_n = input("Enter your passport number ---- ")
for items, keys in self.passenger_dtb.items():
if pass_n in items:
known = True
else:
known = False
if not (known):
self.passenger_dtb[pass_n] = Passenger(name, pass_n)
else:
print("Oh, you're already registered !")
self.current_user = self.passenger_dtb[pass_n]
print(
"Hello "
+ self.current_user.pseudo
+ " what do you want to do ?\n 1 - Reserve a Ticket\n 2 - Cancel a reservation\n 3 - Display ticket information.\n 4 - Exit"
)
opt = int(input("Choice ---- "))
if opt == 1:
self.reserve_ticket(self.current_user)
elif opt == 2:
self.cancel_ticket(self.current_user)
elif opt == 3:
self.display_info(self.current_user)
elif opt == 4:
self.exit = True
else:
print("Not yet implemented.")
self.main_menu_it += 1
def reserve_ticket(self, Passenger, auto_test=False):
if auto_test == False:
os.system("clear")
print("---- MENU ----")
for i in range(len(self.destinations)):
print(str(i + 1) + " : " + self.destinations[i])
if auto_test == False:
dest = 0
while dest < 1 or dest > len(self.destinations):
dest = int(input("Choice : "))
else:
dest = random.randint(1, len(self.destinations))
chosen_flight = self.flight_dtb[self.destinations[dest - 1]]
if Passenger.passport_number in self.passport_dtb:
print(
"Your Passport Number is already used by someone, are you sure it is correct ?"
)
else:
self.passport_dtb.append(Passenger.passport_number)
ticket = self.generate_ticket(Passenger)
Passenger.book_ticket(chosen_flight, ticket)
chosen_flight.add_passenger(Passenger)
print(
"Your ticket for "
+ chosen_flight.destination
+ " has been successfully booked."
)
print("Returning to main menu...")
if not (auto_test):
input("Press Enter to continue...")
time.sleep(2)
def cancel_ticket(self, Passenger, auto_test=False):
if not (Passenger.has_booked):
print("You can't cancel tickets because you never booked one !")
if not (auto_test):
input("Press Enter to continue...")
else:
if auto_test == False:
os.system("clear")
print("---- MENU ----")
Passenger.print_tickets()
cities_list = []
ticket_numb_list = []
for cities, ticket_numb in Passenger.book.items():
cities_list.append(cities)
ticket_numb_list.append(ticket_numb)
if not (auto_test):
selected_ticket = int(
input("Type the ticket number you want to cancel.")
)
while selected_ticket not in ticket_numb_list:
print("Ticket not found, type it again !")
selected_ticket = int(
input("Type the ticket number you want to cancel.")
)
else:
selected_ticket = random.choice(ticket_numb)
index = ticket_numb_list.index(selected_ticket)
chosen_flight = self.flight_dtb[cities_list[index]]
Passenger.remove_ticket(chosen_flight, selected_ticket)
self.ticket_list.remove(selected_ticket)
chosen_flight.remove_passenger(Passenger)
print(
"Your ticket for "
+ chosen_flight.destination
+ " has been successfully canceled."
)
print("Returning to main menu...")
if not (auto_test):
input("Press Enter to continue...")
time.sleep(1)
def display_info(self, Passenger):
Passenger.print_info()
input("Press Enter to continue...")
time.sleep(2)
def generate_ticket(self, Passenger):
ticket = random.randint(1000, 100000)
while ticket in self.ticket_list:
ticket = random.randint(1000, 100000)
self.ticket_list.append(ticket)
return ticket
|
"""Update a single file in an already published Zenodo deposition and collect the new version of the DOI."""
####
#### Update a single file in an already published Zenodo deposition
#### and collect the new version of the DOI.
####
#### Example usage to operate in Zenodo:
#### python3 ./scripts/zenodo-version-update.py --help
#### Implicit new version:
#### python3 ./scripts/zenodo-version-update.py --verbose --sandbox --key abc --concept 199441 --file /tmp/go-release-reference.tgz --output /tmp/release-doi.json
#### Explicit new version:
#### python3 ./scripts/zenodo-version-update.py --verbose --sandbox --key abc --concept 199441 --file /tmp/go-release-reference.tgz --output /tmp/release-doi.json --revision `date +%Y-%m-%d`
####
## Standard imports.
import sys
import argparse
import logging
import os
import json
import requests
# from requests_toolbelt.multipart.encoder import MultipartEncoder
import datetime
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger('zenodo-version-update')
LOG.setLevel(logging.WARNING)
def die(instr):
"""Die a little inside."""
LOG.error(instr)
sys.exit(1)
def main():
"""The main runner for our script."""
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
parser.add_argument('-k', '--key',
help='The access key (token) to use for commands.')
parser.add_argument('-s', '--sandbox', action='store_true',
help='If used, will aim at the sandbox server.')
parser.add_argument('-c', '--concept',
help='[optional] The base published concept that we want to work off of.')
parser.add_argument('-f', '--file',
help='[optional] The local file to use in an action.')
parser.add_argument('-o', '--output',
help='[optional] The local file to use in an action.')
parser.add_argument('-r', '--revision',
help='[optional] Add optional revision string to update.')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
LOG.info('Verbose: on')
## Ensure server URL.
server_url = 'https://zenodo.org'
if args.sandbox :
server_url = 'https://sandbox.zenodo.org'
LOG.info('Will use: ' + server_url)
## Ensure key/token.
if not args.key:
die('need a "key/token" argument')
LOG.info('Will use key/token: ' + args.key)
## Check JSON output file.
if args.output:
LOG.info('Will output to: ' + args.output)
## Check JSON output file.
revision = None
if args.revision:
revision = args.revision
LOG.info('Will add explicit "version" string to revision: ' + revision)
else:
revision = datetime.datetime.now().strftime("%Y-%m-%d")
LOG.info('Will add implicit "version" string to revision: ' + revision)
## Ensure concept.
if not args.concept:
die('need a "concept" argument')
concept_id = int(args.concept)
LOG.info('Will use concept ID: ' + str(concept_id))
def die_screaming(instr, response=None, deposition_id=None):
"""Make sure we exit in a way that will get Jenkins's attention, giving good response debugging information along the way if available."""
if str(type(response)) == "<class 'requests.models.Response'>":
if not response.text or response.text == "":
LOG.error('no response from server')
LOG.error(instr)
else:
LOG.error(json.dumps(response.json(), indent=4, sort_keys=True))
LOG.error(response.status_code)
LOG.error(instr)
if deposition_id:
LOG.error("attempting to discard working deposition: " + str(deposition_id))
response = requests.delete(server_url + '/api/deposit/depositions/' + str(deposition_id), params={'access_token': args.key})
if response.status_code != 204:
LOG.error('failed to discard: manual intervention plz')
LOG.error(response.status_code)
else:
LOG.error('discarded session')
sys.exit(1)
###
###
###
## Convert the filename into a referential base for use later on.
filename = os.path.basename(args.file)
## Get listing of all depositions.
response = requests.get(server_url + '/api/deposit/depositions', params={'access_token': args.key})
## Test file listing okay.
if response.status_code != 200:
die_screaming('cannot get deposition listing', response)
## Go from concept id to deposition listing.
depdoc = None
for entity in response.json():
conceptrecid = entity.get('conceptrecid', None)
if conceptrecid and str(conceptrecid) == str(concept_id):
depdoc = entity
## Test deposition doc search okay.
if not depdoc:
die_screaming('could not find desired concept', response)
## Test that status is published (no open session).
if depdoc.get('state', None) != 'done':
die_screaming('desired concept currently has an "open" status', response)
## Get current deposition id.
curr_dep_id = int(depdoc.get('id', None))
LOG.info('current deposition id: ' + str(curr_dep_id))
## Get files for the current depositon.
response = requests.get(server_url + '/api/deposit/depositions/' + str(curr_dep_id) + '/files', params={'access_token': args.key})
## Test file listing okay.
if response.status_code != 200:
die_screaming('cannot get file listing', response)
## Go from filename to file ID.
file_id = None
for filedoc in response.json():
filedoc_fname = filedoc.get('filename', None)
if filedoc_fname and filedoc_fname == filename:
file_id = filedoc.get('id', None)
## Test file ID search okay.
if not file_id:
die_screaming('could not find desired filename', response)
## Open versioned deposition session.
response = requests.post(server_url + '/api/deposit/depositions/' + str(curr_dep_id) + '/actions/newversion', params={'access_token': args.key})
## Test correct opening.
if response.status_code != 201:
die_screaming('cannot open new version/session', response, curr_dep_id)
## Get the new deposition id for this version.
new_dep_id = None
d = response.json()
if d.get('links', False) and d['links'].get('latest_draft', False):
new_dep_id = int(d['links']['latest_draft'].split('/')[-1])
## Test that there is a new deposition ID.
if not new_dep_id:
die_screaming('could not find a new deposition ID', response, curr_dep_id)
LOG.info('new deposition id: ' + str(new_dep_id))
## Delete the current file (by ID) in the session.
#response = requests.delete('%s/%s' % (new_bucket_url, filename), params={'access_token': args.key})
response = requests.delete(server_url + '/api/deposit/depositions/' + str(new_dep_id) + '/files/' + str(file_id), params={'access_token': args.key})
## Test correct file delete.
if response.status_code != 204:
die_screaming('could not delete file', response, new_dep_id)
###
### WARNING: Slipping into the (currently) unpublished v2 API here
### to get around file size issues we ran into.
### I don't quite understand the bucket API--the URLs shift more than
### I'd expect, but the following works.
###
### NOTE: secret upload magic: https://github.com/zenodo/zenodo/issues/833#issuecomment-324760423 and
### https://github.com/zenodo/zenodo/blob/df26b68771f6cffef267c056cf38eb7e6fa67c92/tests/unit/deposit/test_api_buckets.py
###
## Get new depositon...as the bucket URLs seem to have changed
## after the delete...
response = requests.get(server_url + '/api/deposit/depositions/' + str(new_dep_id), params={'access_token': args.key})
## Get the bucket for upload.
new_bucket_url = None
d = response.json()
if d.get('links', False) and d['links'].get('bucket', False):
new_bucket_url = d['links'].get('bucket', False)
## Test that there are new bucket and publish URLs.
if not new_bucket_url:
die_screaming('could not find a new bucket URL', response, curr_dep_id)
LOG.info('new bucket URL: ' + str(new_bucket_url))
## Add the new version of the file. Try and avoid:
## https://github.com/requests/requests/issues/2717 with
## https://toolbelt.readthedocs.io/en/latest/uploading-data.html#streaming-multipart-data-encoder
## Try 1 caused memory overflow issues (I'm trying to upload many GB).
## Try 2 "should" have worked, but zenodo seems incompatible.
## with requests and the request toolbelt, after a fair amount of effort.
## Try 3 appears to work, but uses an unpublished API and injects the
## multipart information in to the file... :(
##
## Try 4...not working...
# encoder = MultipartEncoder({
# 'file': (filename, open(args.file, 'rb'),'application/octet-stream')
# })
# response = requests.put('%s/%s' % (new_bucket_url, filename),
# data=encoder,
# #data = {'filename': filename},
# #files = {'file': open(args.file, 'rb')},
# params = {'access_token': args.key},
# headers={
# #"Accept":"multipart/related; type=application/octet-stream",
# "Content-Type":encoder.content_type
# })
## Try 3
with open(args.file, "rb") as fp:
response = requests.put("{url}/{fname}".format(url=new_bucket_url, fname=filename),
data=fp,
params={'access_token': args.key}
)
# ## Try 2
# encoder = MultipartEncoder({
# #'filename': filename,
# 'file': (filename, open(args.file, 'rb'))
# })
# response = requests.post(server_url + '/api/deposit/depositions/' + str(new_dep_id) + '/files', params={'access_token': args.key}, data=encoder)
# ## Try 1
# data = {'filename': filename}
# files = {'file': open(args.file, 'rb')}
# response = requests.post(server_url + '/api/deposit/depositions/' + str(new_dep_id) + '/files', params={'access_token': args.key}, data=data, files=files)
## Test correct file add.
if response.status_code > 200:
die_screaming('could not add file', response, new_dep_id)
###
### NOTE: Leaving v2 area.
###
## Update metadata version string; first, get old metadata.
response = requests.get(server_url + '/api/deposit/depositions/' + str(new_dep_id), params={'access_token': args.key})
## Test correct metadata get.
if response.status_code != 200:
die_screaming('could not get access to current metadata', response, new_dep_id)
## Get metadata or die trying.
oldmetadata = None
if response.json().get('metadata', False):
oldmetadata = response.json().get('metadata', False)
else:
die_screaming('could not get current metadata', response, new_dep_id)
## Construct update metadata and send to server.
oldmetadata['version'] = revision
newmetadata = {
"metadata": oldmetadata
}
headers = {"Content-Type": "application/json"}
response = requests.put(server_url + '/api/deposit/depositions/' + str(new_dep_id), params={'access_token': args.key}, data=json.dumps(newmetadata), headers=headers)
## Test correct metadata put.
if response.status_code != 200:
die_screaming('could not add optional metadata', response, new_dep_id)
## Publish.
response = requests.post(server_url + '/api/deposit/depositions/' + str(new_dep_id) + '/actions/publish', params={'access_token': args.key})
## Test correct re-publish/version action.
if response.status_code != 202:
die_screaming('could not re-publish', response, new_dep_id)
## Extract new DOI.
doi = None
if response.json().get('doi', False):
doi = response.json().get('doi', False)
else:
die_screaming('could not get DOI', response, new_dep_id)
## Done!
LOG.info(str(doi))
if args.output:
with open(args.output, 'w+') as fhandle:
fhandle.write(json.dumps({'doi': doi}, sort_keys=True, indent=4))
## You saw it coming...
if __name__ == '__main__':
main()
|
class Store(object):
"""
An object that can be used to keep track of state
"""
def __init__(self, *args, **kwargs):
self._storage = {}
for item in args:
if isinstance(item, dict):
self._storage.update(item)
self._storage.update(kwargs)
def set(self, key, value):
"""
Set a particular state variable
:param key: The name of the state variable
:param value: The value to assign to the variable
"""
self._storage[key] = value
def delete(self, key):
"""
Delete a particular state variable
:param key: The name of the state variable
"""
self._storage.pop(key)
def get(self, key, default=None):
"""
Get a particular stage variable. Defaults to None.
:param key: The name of the stage variable
:param default: The default value if there is no variable
"""
return self._storage.get(key, default)
|
# coding: utf-8
import webiopi
import time
import RPi.GPIO as GPIO
webiopi.setDebug()
GPIO.setwarnings(False)
#initial setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(14,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(15,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(18,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(24,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(17,GPIO.OUT,initial=GPIO.LOW) #ultrasonic
GPIO.setup(27,GPIO.IN) #ultrasonic
GPIO.setup(2,GPIO.OUT,initial=GPIO.LOW) #led
GPIO.output(14,0)
GPIO.output(15,0)
pwmS=GPIO.PWM(24,50) #submotorpwm
pwmD=GPIO.PWM(18,50) #dcmotorpwm
pwmD.start(0)
pwmS.start(0)
pwmS.ChangeDutyCycle(8.4)
@webiopi.macro
def servo_freq(freq):
global pwmS
pwmS.ChangeFrequency(int(freq))
@webiopi.macro
def servo_duty(duty):
global pwmS
pwmS.ChangeDutyCycle(float(duty))
@webiopi.macro
def dc_freq(freq):
global pwmD
pwmD.ChangeFrequency(int(freq))
@webiopi.macro
def dc_duty(duty):
global pwmD
pwmD.ChangeDutyCycle(float(duty))
@webiopi.macro
def pwm_stop():
global pwmS,pwmD
pwmD.stop()
pwmS.stop()
@webiopi.macro
def output_control(io,value):
GPIO.output(int(io),int(value))
@webiopi.macro
def sleep():
webiopi.sleep(5)
@webiopi.macro
def distance():
GPIO.output(17, True)
time.sleep(0.00001)
GPIO.output(17, False)
while GPIO.input(27) == 0:
signaloff = time.time()
while GPIO.input(27) == 1:
signalon = time.time()
timepassed = signalon - signaloff
distance = timepassed * 17000
if(distance<150):
return distance
else:
return 150
#output_control(14,0)
#output_control(15,1)
#dc_duty(20)
#time.sleep(5)
|
from lewis.core import approaches
from lewis.core.statemachine import State
class UninitializedState(State):
NAME = "UninitializedState"
def on_entry(self, dt):
print("Entering uninitialized state")
class InitializedState(State):
NAME = "InitializedState"
def on_entry(self, dt):
print("Entering initialized state")
class MovingState(State):
NAME = "MovingState"
def in_state(self, dt):
device = self._context
device.a = approaches.linear(device.a, device.a_setpoint, 10, dt)
device.b = approaches.linear(device.b, device.b_setpoint, 10, dt)
device.c = approaches.linear(device.c, device.c_setpoint, 10, dt)
def on_entry(self, dt):
print("Entering moving state")
|
M = 998244353
n = int(input())
a = [int(x) for x in input().split()]
from math import log
"""l = len(str(a[0]))"""
ans = 0
"""for i in range(l+1):
for j in range(len(a)):
ans+= (10**(2*i))*(a[j]%10)%M
a[j]//=10
print((n*11*ans)%M)"""
def op(n):
out = 0
if(n==0):
return 0
d = int(log(n,10))+1
for i in range(d+1):
out+= 11*(10**(2*i))*(n%10)%M
n//=10
return out
for ai in a:
ans+= (n*op(ai))%M
ans%=M
print(ans)
|
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
img = Image("creature.png")
def draw(canvas):
canvas.clear()
translate(250, 250)
scale(0.5)
t = canvas.frame % 100 * 0.01 # A number between 0.0 and 1.0.
t = smoothstep(0.0, 1.0, t) # Slow down ("ease") when nearing 0.0 or 1.0.
rotate(t * 360)
m = 1.0 - 2 * abs(0.5-t) # A number that goes from 0.0 to 1.0 and then back to 0.0.
scale(0.5+m)
# The image command has an optional "color" and an optional "alpha" parameter.
# The alpha sets the overall opacity of the image (alpha=0.25 means 75% transparent).
# The color adjusts the fill color of the image. By default it is (1,1,1,1),
# which means that the image pixels are mixed with white and remain unaffected.
# In this case, we lower the green component,
# so the creature gets more pink when it flies.
image(img, x=0, y=-img.height, color=color(1, 1-m, 1, 1), alpha=1.0)
# You can pass a Color object (i.e. returned from color()),
# or simply a (R,G,B)-tuple, which is faster because no Color needs to be constructed.
canvas.size = 500, 500
canvas.run(draw)
|
from ..imports import *
from .. import utils as U
from . import preprocessor as tpp
MAX_FEATURES = 20000
MAXLEN = 400
def texts_from_folder(datadir, classes=None,
max_features=MAX_FEATURES, maxlen=MAXLEN,
ngram_range=1,
train_test_names=['train', 'test'],
preprocess_mode='standard',
verbose=1):
"""
Returns corpus as sequence of word IDs.
Assumes corpus is in the following folder structure:
├── datadir
│ ├── train
│ │ ├── class0 # folder containing documents of class 0
│ │ ├── class1 # folder containing documents of class 1
│ │ ├── class2 # folder containing documents of class 2
│ │ └── classN # folder containing documents of class N
│ └── test
│ ├── class0 # folder containing documents of class 0
│ ├── class1 # folder containing documents of class 1
│ ├── class2 # folder containing documents of class 2
│ └── classN # folder containing documents of class N
If train and test contain additional subfolders that do not represent
classes, they can be ignored by explicitly listing the subfolders of
interest using the classes argument.
Args:
datadir (str): path to folder
classes (list): list of classes (subfolders to consider)
max_features (int): maximum number of unigrams to consider
maxlen (int): maximum length of tokens in document
ngram_range (int): If > 1, will include 2=bigrams, 3=trigrams and bigrams
train_test_names (list): list of strings represnting the subfolder
name for train and validation sets
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (bool): verbosity
"""
# read in training and test corpora
train_str = train_test_names[0]
test_str = train_test_names[1]
train_b = load_files(os.path.join(datadir, train_str), shuffle=True, categories=classes)
test_b = load_files(os.path.join(datadir, test_str), shuffle=False, categories=classes)
x_train = [x.decode('utf-8') for x in train_b.data]
x_test = [x.decode('utf-8') for x in test_b.data]
y_train = train_b.target
y_test = test_b.target
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = train_b.target_names,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
#if preprocess_mode == 'bert':
#if maxlen > 512: raise ValueError('BERT only supports maxlen <= 512')
#vocab_path = os.path.join(bertpath, 'vocab.txt')
#token_dict = {}
#with codecs.open(vocab_path, 'r', 'utf8') as reader:
#for line in reader:
#token = line.strip()
#token_dict[token] = len(token_dict)
#tokenizer = BERT_Tokenizer(token_dict)
#x_train = bert_tokenize(x_train, tokenizer, maxlen)
#x_test = bert_tokenize(x_test, tokenizer, maxlen)
#return (x_train, y_train), (x_test, y_test), None
def texts_from_csv(train_filepath,
text_column,
label_columns = [],
val_filepath=None,
max_features=MAX_FEATURES, maxlen=MAXLEN,
val_pct=0.1, ngram_range=1, preprocess_mode='standard', verbose=1):
"""
Loads text data from CSV file. Class labels are assumed to one of following:
1. integers representing classes (e.g., 1,2,3,4)
2. one-hot-encoded arrays representing classes
classification (a single one in each array): [[1,0,0], [0,1,0]]]
multi-label classification (one more ones in each array): [[1,1,0], [0,1,1]]
Args:
train_filepath(str): file path to training CSV
text_column(str): name of column containing the text
label_column(list): list of columns that are to be treated as labels
val_filepath(string): file path to test CSV. If not supplied,
10% of documents in training CSV will be
used for testing/validation.
max_features(int): max num of words to consider in vocabulary
maxlen(int): each document can be of most <maxlen> words. 0 is used as padding ID.
ngram_range(int): size of multi-word phrases to consider
e.g., 2 will consider both 1-word phrases and 2-word phrases
limited by max_features
val_pct(float): Proportion of training to use for validation.
Has no effect if val_filepath is supplied.
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (boolean): verbosity
"""
# read in train and test data
train = pd.read_csv(train_filepath)
x = train[text_column].fillna('fillna').values
y = train[label_columns].values
if val_filepath is not None:
test = pd.read_csv(val_filepath)
x_test = train[text_column].fillna('fillna').values
y_test = train[label_columns].values
x_train = x
y_train = y
else:
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=val_pct)
y_train = np.squeeze(y_train)
y_test = np.squeeze(y_test)
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = label_columns,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
def texts_from_array(x_train, y_train, x_test=None, y_test=None,
class_names = [],
max_features=MAX_FEATURES, maxlen=MAXLEN,
val_pct=0.1, ngram_range=1, preprocess_mode='standard', verbose=1):
"""
Loads and preprocesses text data from arrays.
Args:
x_train(list): list of training texts
y_train(list): list of integers representing classes
x_val(list): list of training texts
y_val(list): list of integers representing classes
class_names (list): list of strings representing class labels
shape should be (num_examples,1) or (num_examples,)
max_features(int): max num of words to consider in vocabulary
maxlen(int): each document can be of most <maxlen> words. 0 is used as padding ID.
ngram_range(int): size of multi-word phrases to consider
e.g., 2 will consider both 1-word phrases and 2-word phrases
limited by max_features
val_pct(float): Proportion of training to use for validation.
Has no effect if x_val and y_val is supplied.
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (boolean): verbosity
"""
if not class_names:
classes = list(set(y_train))
classes.sort()
class_names = ["%s" % (c) for c in classes]
if x_test is None or y_test is None:
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=val_pct)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = class_names,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
|
"""
Copyright 2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from teeth_overlord import models
from teeth_overlord import tests
class TestChassisAPI(tests.TeethAPITestCase):
def setUp(self):
super(TestChassisAPI, self).setUp()
self.url = '/v1/chassis'
self.chassis_objects_mock = self.add_mock(models.Chassis)
self.chassis1 = models.Chassis(id='chassis1',
state=models.ChassisState.READY)
self.chassis2 = models.Chassis(id='chassis2',
state=models.ChassisState.BUILD,
instance_id="instance_id")
def test_list_chassis_some(self):
self.list_some(models.Chassis,
self.chassis_objects_mock,
self.url,
[self.chassis1, self.chassis2])
def test_list_chassis_none(self):
self.list_none(models.Chassis,
self.chassis_objects_mock,
self.url,
[self.chassis1, self.chassis2])
def test_fetch_chassis_one(self):
self.fetch_one(models.Chassis,
self.chassis_objects_mock,
self.url,
[self.chassis1, self.chassis2])
def test_fetch_chassis_none(self):
self.fetch_none(models.Chassis,
self.chassis_objects_mock,
self.url,
[self.chassis1, self.chassis2])
def test_delete_chassis_none(self):
self.delete_none(models.Chassis,
self.chassis_objects_mock,
self.url,
[self.chassis1, self.chassis2])
def test_create_chassis(self):
return_value = [
models.ChassisModel(id='chassis_model_id', name='chassis_model'),
]
self.add_mock(models.ChassisModel, return_value=return_value)
data = {
'chassis_model_id': 'chassis_model_id',
}
response = self.make_request('POST', self.url, data=data)
# get the saved instance
chassis_save_mock = self.get_mock(models.Chassis, 'save')
self.assertEqual(chassis_save_mock.call_count, 1)
chassis = chassis_save_mock.call_args[0][0]
self.assertEqual(chassis.chassis_model_id, 'chassis_model_id')
self.assertEqual(response.status_code, 201)
self.assertEqual(response.headers['Location'],
'http://localhost{url}/{id}'.format(url=self.url,
id=chassis.id))
def test_create_chassis_deleted_chassis_model(self):
self.add_mock(models.ChassisModel,
return_value=[models.ChassisModel(id='chassis_model_id',
name='chassis_model',
deleted=True)])
response = self.make_request(
'POST',
self.url,
data={'chassis_model_id': 'chassis_model_id'})
data = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertEqual(data['message'], 'Invalid request body')
self.assertTrue('ChassisModel is deleted' in data['details'])
def test_create_chassis_bad_chassis_model(self):
self.add_mock(models.ChassisModel,
side_effect=models.ChassisModel.DoesNotExist)
data = {
'chassis_model_id': 'does_not_exist',
}
response = self.make_request('POST', self.url, data=data)
data = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertEqual(data['message'], 'Invalid request body')
self.assertEqual(self.get_mock(models.Chassis, 'save').call_count, 0)
def test_delete_chassis(self):
self.chassis_objects_mock.return_value = [self.chassis1]
response = self.make_request('DELETE',
'{url}/{id}'.format(url=self.url,
id=self.chassis1.id))
self.assertEqual(response.status_code, 204)
save_mock = self.get_mock(models.Chassis, "save")
self.assertEqual(save_mock.call_count, 1)
chassis = save_mock.call_args[0][0]
self.assertEqual(chassis.state, models.ChassisState.DELETED)
def test_delete_chassis_already_deleted(self):
self.chassis1.state = models.ChassisState.DELETED
self.chassis_objects_mock.return_value = [self.chassis1]
response = self.make_request('DELETE',
'{url}/{id}'.format(url=self.url,
id=self.chassis1.id))
self.assertEqual(response.status_code, 403)
save_mock = self.get_mock(models.Chassis, "save")
self.assertEqual(save_mock.call_count, 0)
data = json.loads(response.data)
self.assertEqual(data['message'], 'Object already deleted')
def test_delete_chassis_with_active_instance(self):
self.chassis_objects_mock.return_value = [self.chassis2]
response = self.make_request('DELETE',
'{url}/{id}'.format(url=self.url,
id=self.chassis2.id))
self.assertEqual(response.status_code, 403)
save_mock = self.get_mock(models.Chassis, "save")
self.assertEqual(save_mock.call_count, 0)
data = json.loads(response.data)
self.assertEqual(data['message'], 'Object cannot be deleted')
|
#! /usr/bin/env python
import argparse
import sys
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import Point
############################
# set the command line argument parser
parser = argparse.ArgumentParser(description="Converts vector data to maxent format csv data")
# set input/output file arguments
parser.add_argument("-i", "--input", help="path to vector file", type=str, required=True)
parser.add_argument("-o", "--output", help="path to the output csv", type=str, required=True)
# set the attribute field to identify the species
parser.add_argument("-f", "--field", help="the attribute field to set as the species", type=str, default="species")
# allow user to set an attribute field as the x/y data
parser.add_argument("--xfield", help="use a certain attribute field as the Y data", default=None)
parser.add_argument("--yfield", help="use a certain attribute field as the Y data", default=None)
# now various flags and arguments
parser.add_argument("-e", "--epsg", help="the output projection", default=None)
# finall, parse the arguments
args = parser.parse_args()
############################
# set a couple of useful functions
def getXY(pt):
return (pt.x, pt.y)
def read_input(path):
try:
data = gpd.read_file(path)
geo = True
except FileNotFoundError:
try:
data = pd.read_csv(path)
geo = False
except FileNotFoundError:
print("[ ERROR! ]: Unable to read file: {}".format(path))
print("[ ERROR! ]: Please ensure it is a vector file or a CSV")
sys.exit(1)
return data, geo
def check_fields(df, args, geo):
# get the vector attributes
attributes = df.columns.tolist()
# check the species field is listed as a column in the dataframe
if args.field not in attributes:
print("[ ERROR! ]: Field set for species ID: {}".format(args.field))
print("[ ERROR! ]: is not a vector attribute - select from the following:")
print("[ ERROR! ]: [ {} ]".format(", ".join(attributes)))
sys.exit(1)
# if geometry is set as an attribute, check they are listed in the dataframe
if args.xfield is not None:
if args.xfield not in attributes:
print("[ ERROR! ]: Field set for x data: {}".format(args.xfield))
print("[ ERROR! ]: is not an attribute - select from the following:")
print("[ ERROR! ]: [ {} ]".format(", ".join(attributes)))
sys.exit(1)
if args.yfield is not None:
if args.yfield not in attributes:
print("[ ERROR! ]: Field set for y data: {}".format(args.yfield))
print("[ ERROR! ]: is not a vector attribute - select from the following:")
print("[ ERROR! ]: [ {} ]".format(", ".join(attributes)))
sys.exit(1)
# or, if the input data are not a vector, ensure that the x, y, and projection info are set
if not geo:
if args.xfield not in attributes:
print("[ ERROR! ]: Field set for x data (using --xfield): {}".format(args.xfield))
print("[ ERROR! ]: is either not set or is not an attribute - select from the following:")
print("[ ERROR! ]: [ {} ]".format(", ".join(attributes)))
sys.exit(1)
if args.yfield not in attributes:
print("[ ERROR! ]: Field set for y data (using --yfield): {}".format(args.yfield))
print("[ ERROR! ]: is either not set or is not an attribute - select from the following:")
print("[ ERROR! ]: [ {} ]".format(", ".join(attributes)))
sys.exit(1)
############################
# perform the file conversion
vector, geo = read_input(args.input)
# check that the fields passed as arguments are correct
check_fields(vector, args, geo)
# report starting
print("[ STATUS ]: Running vector-to-maxent")
print("[ STATUS ]: Converting input file : {}".format(args.input))
print("[ STATUS ]: To maxent-format file : {}".format(args.output))
# if the input data are vectors, pull the x/y from the geometry and add to new columns
if geo:
# first, reproject if set
if args.epsg is not None:
vector.to_crs(epsg=args.epsg, inplace=True)
# get the centroids to put the data in a useful format
centroids = vector["geometry"].centroid
# pull the x/y for each point
x, y = [list(pt) for pt in zip(*map(getXY, centroids))]
# then slap 'em on as new columns
vector["x"] = x
vector["y"] = y
# and set them as the x and yfields if not already passed by the user
if args.xfield is None:
args.xfield = "x"
if args.yfield is None:
args.yfield = "y"
# create a new dataframe with just the species/x/y data
labels_input = [args.field, args.xfield, args.yfield]
labels_output = {args.field: "species", args.xfield: "X", args.yfield: "Y"}
maxent_df = vector[labels_input].rename(columns=labels_output)
# then write the output file
maxent_df.to_csv(args.output, index=False)
# celebrate widely!
print("[ STATUS ]: Finished vector-to-maxent!")
print("[ STATUS ]: See output file: {}".format(args.output))
|
import pytest
from sfa_dash.conftest import BASE_URL
@pytest.fixture
def upload_routes(forecast_id, observation_id, cdf_forecast_id):
return [
f'/forecasts/single/{forecast_id}/upload',
f'/observations/{observation_id}/upload',
f'/forecasts/cdf/single/{cdf_forecast_id}/upload']
@pytest.fixture(params=[0,1,2])
def single_upload_route(request, upload_routes):
return upload_routes[request.param]
def assert_upload_get_not_allowed(client, single_upload_routes):
resp = client.get(single_upload_routes,
base_url=BASE_URL)
assert resp.status_code == 405
|
from abc import abstractmethod
from typing import Union, Callable
import numpy as np
import scipy.integrate as integrate
from beartype import beartype
from UQpy.distributions.baseclass import Distribution
import warnings
from UQpy.distributions.collection import Uniform, Normal
warnings.filterwarnings('ignore')
class Polynomials:
@beartype
def __init__(self, distributions: Union[Distribution, list[Distribution]], degree: int):
"""
Class for polynomials used for the polynomial_chaos method.
:param distributions: Object from a distribution class.
:param degree: Maximum degree of the polynomials.
"""
self.distributions = distributions
self.degree = degree + 1
@staticmethod
def standardize_normal(tensor: np.ndarray, mean: float, std: float):
"""
Static method: Standardize data based on the standard normal distribution :math:`\mathcal{N}(0,1)`.
:param tensor: Input data generated from a normal distribution.
:param mean: Mean value of the original normal distribution.
:param std: Standard deviation of the original normal distribution.
:return: Standardized data.
"""
return (tensor - mean) / std
@staticmethod
def standardize_uniform(x, uniform):
loc = uniform.get_parameters()['loc'] # loc = lower bound of uniform distribution
scale = uniform.get_parameters()['scale']
upper = loc + scale # upper bound = loc + scale
return (2 * x - loc - upper) / (upper - loc)
@staticmethod
def normalized(degree: int, samples: np.ndarray, a: float, b: float, pdf_st: Callable, p:list):
"""
Calculates design matrix and normalized polynomials.
:param degree: polynomial degree
:param samples: Input samples.
:param a: Left bound of the support the distribution.
:param b: Right bound of the support of the distribution.
:param pdf_st: Pdf function generated from :py:mod:`UQpy` distribution object.
:param p: List containing the orthogonal polynomials generated with scipy.
:return: Design matrix,normalized polynomials
"""
pol_normed = []
m = np.zeros((degree, degree))
for i in range(degree):
for j in range(degree):
int_res = integrate.quad(
lambda k: p[i](k) * p[j](k) * pdf_st(k),
a,
b,
epsabs=1e-15,
epsrel=1e-15,
)
m[i, j] = int_res[0]
pol_normed.append(p[i] / np.sqrt(m[i, i]))
a = np.zeros((samples.shape[0], degree))
for i in range(samples.shape[0]):
for j in range(degree):
a[i, j] = pol_normed[j](samples[i])
return a, pol_normed
def get_mean(self):
"""
Returns a :any:`float` with the mean of the :py:mod:`UQpy` distribution object.
"""
m = self.distributions.moments(moments2return="m")
return m
def get_std(self):
"""
Returns a :any:`float` with the variance of the :py:mod:`UQpy` distribution object.
"""
s = np.sqrt(self.distributions.moments(moments2return="v"))
return s
def location(self):
"""
Returns a :any:`float` with the location of the :py:mod:`UQpy` distribution object.
"""
m = self.distributions.__dict__["parameters"]["location"]
return m
def scale(self):
"""
Returns a :any:`float` with the scale of the :py:mod:`UQpy` distribution object.
"""
s = self.distributions.__dict__["parameters"]["scale"]
return s
@abstractmethod
def evaluate(self, x: np.ndarray):
pass
distribution_to_polynomial = { }
|
#factorial(N) = N * factorial(N-1)
# 예 factorial(4) = 4 * factorical(3)
#...
#factoral(1) = 1
def factorial(n):
if n == 1:
return 1
# 이 부분을 채워보세요!
return n * factorial(n-1)
print(factorial(5))
|
# encoding: utf-8
from django.db import models
from cool.model import descriptors
class ForeignKey(models.ForeignKey):
"""
外键字段自动使用缓存获取数据
"""
forward_related_accessor_class = descriptors.ForwardManyToOneCacheDescriptor
class OneToOneField(models.OneToOneField):
"""
一对一字段自动使用缓存获取数据
"""
forward_related_accessor_class = descriptors.ForwardOneToOneCacheDescriptor
|
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
parser = argparse.ArgumentParser(
description="extracts the total xor differnces from an xor log"
)
parser.add_argument("--log_file", "-l", required=True, help="log file")
parser.add_argument(
"--output", "-o", required=True, help="output file to store results"
)
args = parser.parse_args()
log_file_name = args.log_file
out_file_name = args.output
string = "XOR differences:"
pattern = re.compile(r"\s*%s\s*([\d+]+)" % string)
tot_cnt = 0
with open(log_file_name, "r") as f:
for line in f:
m = pattern.match(line)
if m:
tot_cnt += int(m.group(1))
outFileOpener = open(out_file_name, "w")
outFileOpener.write("Total XOR differences = " + str(tot_cnt))
outFileOpener.close()
|
del (*x,)
|
from collections import Counter
from functools import reduce
import logging
from operator import add
import os
from pathlib import Path
from typing import Any, Dict, Optional
import calvin_agent
from calvin_agent.training import is_multi_gpu_training, log_rank_0
import hydra
import numpy as np
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Callback, LightningModule, seed_everything, Trainer
from pytorch_lightning.plugins import DDPPlugin
from pytorch_lightning.utilities import rank_zero_only
import torch
import torch.distributed as dist
from torch.nn import Linear
"""This script will collect data snt store it with a fixed window size"""
logger = logging.getLogger(__name__)
def merge_data(list_of_data):
merged_data = {
"language": {"ann": [], "task": [], "emb": []},
"info": {"episodes": [], "indx": []},
}
for d in list_of_data:
for k in d:
for k2, v2 in d[k].items():
if isinstance(v2, list):
merged_data[k][k2] += v2
elif isinstance(v2, np.ndarray) and len(merged_data[k][k2]) == 0:
merged_data[k][k2] = v2
elif isinstance(v2, np.ndarray) and len(merged_data[k][k2]) != 0:
merged_data[k][k2] = np.concatenate((merged_data[k][k2], v2), axis=0)
else:
print(type(v2))
raise ValueError
return merged_data
class Annotator(Callback):
def __init__(self, cfg):
self.envs = None # type: Any
self.cfg = cfg
self.device = None
self.lang_folder = cfg.lang_folder
self.tasks = hydra.utils.instantiate(cfg.callbacks.rollout.tasks)
self.demo_task_counter_train = Counter() # type: Counter[str]
self.demo_task_counter_val = Counter() # type: Counter[str]
self.train_dataset = None
self.val_dataset = None
self.file_name = "auto_lang_ann.npy" # + save_format
self.train_lang_folder = None
self.val_lang_folder = None
self.collected_data_train = {
"language": {"ann": [], "task": [], "emb": []},
"info": {"episodes": [], "indx": []},
} # type: Dict
self.collected_data_val = {
"language": {"ann": [], "task": [], "emb": []},
"info": {"episodes": [], "indx": []},
} # type: Dict
self.lang_model = None
self.num_samples_train = None
self.num_samples_val = None
self.finished_annotation_val = False
self.scene_idx_info = None
@rank_zero_only
def create_folders(self):
self.train_lang_folder = self.train_dataset.abs_datasets_dir / self.lang_folder
self.train_lang_folder.mkdir(parents=True, exist_ok=True)
self.val_lang_folder = self.val_dataset.abs_datasets_dir / self.lang_folder
self.val_lang_folder.mkdir(parents=True, exist_ok=True)
@rank_zero_only
def compute_val_embeddings(self):
val_sent = OmegaConf.load(
Path(calvin_agent.__file__).parent / f"../conf/annotations/{self.cfg.rollout_sentences}.yaml"
)
embeddings = {}
for task, ann in val_sent.items():
embeddings[task] = {}
language_embedding = self.lang_model(list(ann))
embeddings[task]["emb"] = language_embedding.cpu().numpy()
embeddings[task]["ann"] = ann
np.save(self.val_lang_folder / "embeddings", embeddings)
logger.info("Done saving val language embeddings for Rollouts !")
def init_vars(self, trainer, pl_module):
self.device = pl_module.device
self.val_dataset = trainer.val_dataloaders[0].dataset.datasets["vis"] # type: ignore
self.train_dataset = trainer.train_dataloader.dataset.datasets["vis"]
self.scene_idx_info = np.load(self.train_dataset.abs_datasets_dir / "scene_info.npy", allow_pickle=True).item()
self.envs = {
scene: hydra.utils.instantiate(
self.cfg.callbacks.rollout.env_cfg, self.val_dataset, pl_module.device, scene=scene
)
for scene, _ in self.scene_idx_info.items()
}
if self.cfg.validation_scene not in self.envs:
self.envs[self.cfg.validation_scene] = hydra.utils.instantiate(
self.cfg.callbacks.rollout.env_cfg,
self.val_dataset,
pl_module.device,
scene=self.cfg.validation_scene,
cameras=(),
)
self.create_folders()
self.lang_model = hydra.utils.instantiate(self.cfg.model)
self.compute_val_embeddings()
self.num_samples_train = int(self.cfg.eps * len(self.train_dataset) / len(self.cfg.annotations.keys()))
self.num_samples_val = int(self.cfg.eps * len(self.val_dataset) / len(self.cfg.annotations.keys()))
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Called when the validation loop begins."""
if self.envs is None:
self.init_vars(trainer, pl_module)
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
if self.envs is None:
self.init_vars(trainer, pl_module)
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
batch = batch["vis"] if isinstance(batch, dict) else batch
self.collected_data_val, self.demo_task_counter_val, current_task_counter = self.annotate(
batch,
self.val_dataset,
self.collected_data_val,
self.demo_task_counter_val,
self.num_samples_val,
)
if dist.is_available() and dist.is_initialized():
global_counters = [None for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(global_counters, current_task_counter)
current_task_counter = reduce(add, global_counters)
self.demo_task_counter_val += current_task_counter
if self.check_done(
self.demo_task_counter_val, self.num_samples_val, batch_idx, trainer.num_val_batches[0], "val"
):
print()
print()
print()
logger.info("Finished annotating val dataset")
print()
print()
print()
self.finished_annotation_val = True
def on_train_batch_end( # type: ignore
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
batch = batch["vis"] if isinstance(batch, dict) else batch
self.collected_data_train, self.demo_task_counter_train, current_task_counter = self.annotate(
batch, self.train_dataset, self.collected_data_train, self.demo_task_counter_train, self.num_samples_train
)
if dist.is_available() and dist.is_initialized():
global_counters = [None for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(global_counters, current_task_counter)
current_task_counter = reduce(add, global_counters)
self.demo_task_counter_train += current_task_counter
if self.check_done(
self.demo_task_counter_train, self.num_samples_train, batch_idx, trainer.num_training_batches, "train"
):
print()
print()
print()
log_rank_0("Finished annotating train dataset")
print()
print()
print()
pl_module.finished_annotation_train = True # type: ignore
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, unused: Optional[int] = None) -> None:
self.save_and_postprocess(self.collected_data_train, self.train_lang_folder, "train", len(self.train_dataset))
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
self.save_and_postprocess(self.collected_data_val, self.val_lang_folder, "val", len(self.val_dataset))
def save_and_postprocess(self, collected_data, lang_folder, mod, length):
if dist.is_available() and dist.is_initialized():
global_collected_data = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(global_collected_data, collected_data)
if dist.get_rank() == 0:
global_collected_data = merge_data(global_collected_data)
np.save("lang_ann", global_collected_data)
else:
np.save("lang_ann", collected_data)
if self.cfg.postprocessing:
language = collected_data["language"]["ann"]
language_embedding = self.lang_model(language)
collected_data["language"]["emb"] = language_embedding.cpu().numpy()
logger.info(f"Done extracting {mod} language embeddings !")
if dist.is_available() and dist.is_initialized():
global_collected_data = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(global_collected_data, collected_data)
if dist.get_rank() != 0:
return
collected_data = merge_data(global_collected_data)
np.save(self.file_name, collected_data)
np.save(lang_folder / self.file_name, collected_data)
logger.info(f"Done saving {mod} language annotations !")
lang_length = float(len(collected_data["language"]["ann"]))
logger.info(
f"\nVision Dataset contains {length} datapoints "
f"\nLanguage Dataset contains {lang_length} datapoints "
f"\n VISION --> {100.0 * length / (length + lang_length):.3f} %"
f"\n LANGUAGE --> {100.0 * lang_length / (length + lang_length):.3f} %"
)
def check_done(self, counter, num_samples, batch_idx, num_batches, mode):
if batch_idx % 10 == 0:
log_rank_0(f"{mode} Tasks Objective: {num_samples}")
log_rank_0(f"Tasks Lang: {self.cfg.annotations.keys()}")
log_rank_0(f"Tasks Annotations Progress: {counter}")
log_rank_0(
"Progress [ "
+ "=" * int(0.5 * 100 * batch_idx / num_batches)
+ ">"
+ "-" * int(0.5 * 100 * (num_batches - batch_idx) / num_batches)
+ str(round(100 * batch_idx / num_batches))
+ "%"
+ "]"
)
return len(counter.values()) >= len(self.cfg.annotations) and min(counter.values()) >= num_samples
def select_env(self, dataset, idx):
if "validation" in dataset.abs_datasets_dir.as_posix():
return self.envs[self.cfg.validation_scene]
seq_idx = dataset.episode_lookup[idx]
for scene, interval in self.scene_idx_info.items():
if interval[0] <= seq_idx <= interval[1]:
return self.envs[scene]
raise ValueError
def annotate(self, episode, dataset, collected_data, global_task_counter, num_samples):
state_obs = episode["robot_obs"]
reset_info = episode["state_info"]
idx = episode["idx"]
batch_size, seq_length = state_obs.shape[0], state_obs.shape[1]
current_task_counter = Counter()
for i in range(batch_size):
env = self.select_env(dataset, idx[i])
# reset env to state of last step in the episode (goal state)
env.reset(reset_info, i, -1)
goal_info = env.get_info()
prior_steps = np.random.randint(16, 32)
env.reset(reset_info, i, prior_steps)
middle_info = env.get_info()
env.reset(reset_info, i, seq_length - 16)
close_to_end_info = env.get_info()
# check if task was achieved in sequence
task_info = self.tasks.get_task_info(middle_info, goal_info)
if (
len(task_info) != 1
or not task_info <= self.cfg.annotations.keys()
or len(self.tasks.get_task_info_for_set(middle_info, close_to_end_info, task_info))
):
continue
task = list(task_info)[0]
if global_task_counter[task] + current_task_counter[task] >= num_samples:
continue
# reset self.env to state of first step in the episode
env.reset(reset_info, i, 0)
start_info = env.get_info()
env.reset(reset_info, i, 32)
middle_info2 = env.get_info()
if len(self.tasks.get_task_info_for_set(start_info, goal_info, task_info)) and not len(
self.tasks.get_task_info(start_info, middle_info2)
):
start_idx = idx[i]
window_size = seq_length
else:
start_idx = idx[i] + prior_steps
window_size = seq_length - prior_steps
# seq_length = torch.unique(actions[i], dim=0).shape[0]
current_task_counter += Counter(task_info)
collected_data = self.label_seq(collected_data, dataset, window_size, start_idx, task)
return collected_data, global_task_counter, current_task_counter
def label_seq(self, collected_data, dataset, seq_length, idx, task):
seq_idx = dataset.episode_lookup[idx]
collected_data["info"]["indx"].append((seq_idx, seq_idx + seq_length))
task_lang = self.cfg.annotations[task]
lang_ann = task_lang[np.random.randint(len(task_lang))]
collected_data["language"]["ann"].append(lang_ann)
collected_data["language"]["task"].append(task)
return collected_data
class LangAnnotationModel(LightningModule):
def __init__(self):
super().__init__()
self.finished_annotation_train = False
self.dummy_net = Linear(1, 1)
def on_train_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: # type: ignore
if self.finished_annotation_train:
return -1 # type: ignore
def training_step(self, batch, batch_idx):
return self.dummy_net(torch.Tensor([0.0]).to(self.device))
def validation_step(self, *args, **kwargs):
pass
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
@hydra.main(config_path="../../conf", config_name="lang_ann.yaml")
def main(cfg: DictConfig) -> None:
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# sets seeds for numpy, torch, python.random and PYTHONHASHSEED.
seed_everything(cfg.seed)
datamodule = hydra.utils.instantiate(cfg.datamodule)
callbacks = Annotator(cfg)
dummy_model = LangAnnotationModel()
trainer_args = {
**cfg.trainer,
"callbacks": callbacks,
"num_sanity_val_steps": 0,
"max_epochs": 1,
"progress_bar_refresh_rate": 0,
"weights_summary": None,
}
# Configure multi-GPU training
if is_multi_gpu_training(trainer_args["gpus"]): # type: ignore
trainer_args["accelerator"] = "ddp"
trainer_args["plugins"] = DDPPlugin(find_unused_parameters=False)
trainer = Trainer(**trainer_args)
trainer.fit(dummy_model, datamodule=datamodule)
trainer.validate(dummy_model, datamodule=datamodule) # type: ignore
if __name__ == "__main__":
main()
|
import numpy as np
'''
np.dot(a, b) # 计算矩阵a和矩阵b的点积
np.linalg.inv(a) # 矩阵a的逆矩阵
'''
# GPS定位
def main():
i = 0
c = 0.299792458
x = np.zeros((6, 4)) # 存储6个卫星的(x, y, z, t)参数
while i < 6:
temp = input()
x[i - 1] = temp.split()
j = 0
while j < 4:
x[i-1][j] = float(x[i-1][j])
j = j + 1
i = i + 1
a = np.zeros((4, 4)) # 系数矩阵
b = np.zeros((4, 1)) # 常数项
j = 0
while j < 4:
a[j][0] = 2 * (x[5][0] - x[j][0])
a[j][1] = 2 * (x[5][1] - x[j][1])
a[j][2] = 2 * (x[5][2] - x[j][2])
a[j][3] = 2 * c * c * (x[j][3] - x[5][3])
b[j][0] = x[5][0] * x[5][0] - x[j][0] * x[j][0] + \
x[5][1] * x[5][1] - x[j][1] * x[j][1] + \
x[5][2] * x[5][2] - x[j][2] * x[j][2] + \
c * c * (x[j][3] * x[j][3] - x[5][3] * x[5][3])
j = j + 1
a = np.linalg.inv(a) # 系数矩阵求逆
print(np.dot(a, b))
if __name__ == "__main__":
main()
|
"""
OpenVINO DL Workbench
Parameters for Model Optimizer related endpoints
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from numbers import Real
from pathlib import Path
from typing import Callable, Iterable, List, Mapping, Optional
from config.constants import PREDEFINED_CONFIG_NAMES_TO_PATH_MAP
from wb.main.enumerates import SupportedFrameworksEnum, ModelPrecisionEnum, ModelColorChannelsEnum, LayoutDimValuesEnum
class Param:
# pylint: disable=too-many-arguments
def __init__(self, param_name: str, cli_arg_name: str = None,
required: bool = False, scope: set = None,
validate: Callable = None, to_arg: Callable = None, to_param: Callable = None):
self.param_name = param_name
self.cli_arg_name = cli_arg_name
self.required = required
self.scope = scope or {'general'}
self.validate = validate if validate else lambda v: isinstance(v, str)
self.to_arg = to_arg if to_arg else lambda v: v
self.to_param = to_param if to_param else lambda v: v
class InputsParam(Param):
keys_config = {
'name': {
'required': True,
'validate': lambda v: isinstance(v, str),
},
'shape': {
'required': False,
'validate': lambda v: isinstance(v, list) and all(isinstance(element, int) for element in v),
},
'means': {
'required': False,
'validate': lambda v: isinstance(v, list) and all(isinstance(element, Real) for element in v),
},
'scales': {
'required': False,
'validate': lambda v: isinstance(v, list) and all(isinstance(element, Real) for element in v),
},
'freezePlaceholderWithValue': {
'required': False,
'validate': lambda v: isinstance(v, str),
},
'layout': {
'required': False,
'validate': lambda v: InputsParam._validate_layout(v),
}
}
def __init__(self, param_name):
super().__init__(param_name, validate=self._validate)
self.to_arg = None
self.to_param = None
@staticmethod
def _validate_layout(layout: List[str]) -> bool:
try:
if not all(isinstance(l, str) and LayoutDimValuesEnum(l) for l in layout):
return False
except ValueError:
return False
return True
@classmethod
def validate_element(cls, element: Mapping) -> Optional[Mapping]:
errors = {
'unknown': [],
'missing': [],
'invalid': {},
}
required_keys = set(k for k, p in cls.keys_config.items() if p['required'])
errors['missing'] = list(required_keys - set(element.keys()))
for key, value in element.items():
if key not in cls.keys_config:
errors['unknown'].append(key)
if not cls.keys_config[key]['validate'](value):
errors['invalid'][key] = value
return errors if any(errors.values()) else None
@classmethod
def _validate(cls, value: Iterable[Mapping]) -> bool:
return bool(value) and isinstance(value, list) and not any(cls.validate_element(e) for e in value)
class MOForm:
params_config = [
Param(
'batch',
cli_arg_name='batch',
validate=lambda v: isinstance(v, int) and v > 0,
),
Param(
'dataType', # precision
cli_arg_name='data_type',
required=True,
validate=lambda v: v in (ModelPrecisionEnum.fp16.value, ModelPrecisionEnum.fp32.value),
),
Param(
'originalChannelsOrder',
cli_arg_name='reverse_input_channels',
required=True,
validate=lambda v: v in ModelColorChannelsEnum.values(),
),
Param(
'originalLayout',
cli_arg_name='disable_nhwc_to_nchw',
scope={SupportedFrameworksEnum.tf.value,
SupportedFrameworksEnum.tf2.value,
SupportedFrameworksEnum.tf2_keras.value},
validate=lambda v: v in ('NCHW', 'NHWC'),
to_arg=lambda v: v == 'NCHW', # If NHWC - reorder, to make it be NHWC
to_param=lambda v: 'NCHW' if v else 'NHWC',
),
Param(
'predefinedTransformationsConfig',
cli_arg_name='transformations_config',
validate=lambda v: v in PREDEFINED_CONFIG_NAMES_TO_PATH_MAP,
to_arg=lambda v: PREDEFINED_CONFIG_NAMES_TO_PATH_MAP[v],
to_param=lambda v: Path(v).name
),
InputsParam('inputs'),
Param(
'outputs',
cli_arg_name='output',
validate=lambda v: isinstance(v, list) and all(isinstance(element, str) for element in v),
to_arg=','.join,
to_param=lambda v: v.split(','),
),
Param(
'enableSsdGluoncv',
cli_arg_name='enable_ssd_gluoncv',
scope={SupportedFrameworksEnum.mxnet.value},
validate=lambda v: isinstance(v, bool),
),
Param(
'legacyMxnetModel',
cli_arg_name='legacy_mxnet_model',
scope={SupportedFrameworksEnum.mxnet.value},
validate=lambda v: isinstance(v, bool),
),
]
def __init__(self, data: dict, framework: str):
self.data = {k: v for k, v in data.items() if v is not None}
self.framework = framework
self.is_invalid = None
self.errors = None
self.validate()
@classmethod
def get_param_name_to_param_conf_map(cls) -> dict:
return {
param_conf.param_name: param_conf
for param_conf in cls.params_config
}
@classmethod
def get_cli_arg_name_to_param_conf_map(cls) -> dict:
return {
param_conf.cli_arg_name: param_conf
for param_conf in cls.params_config
if param_conf.cli_arg_name
}
def validate(self) -> Optional[Mapping]:
errors = {
'missing': [],
'unknown': [],
'unsuitable': [],
'invalid': {},
}
scopes = {'general', self.framework}
required_params = set(p.param_name for p in self.params_config if p.required and p.scope in scopes)
errors['missing'] = list(required_params - set(self.data.keys()))
params_config_map = self.get_param_name_to_param_conf_map()
for key, value in self.data.items():
if key not in params_config_map:
errors['unknown'].append(key)
continue
if not params_config_map[key].scope.intersection(scopes):
errors['unsuitable'].append(key)
if not params_config_map[key].validate(value):
errors['invalid'][key] = value
self.is_invalid = any(errors.values())
self.errors = errors if self.is_invalid else None
return self.errors
def _prepare_channels_order_dependent_values(self, key: str, arg_name: str, args: dict):
values = {input_['name']: input_[key] for input_ in self.data['inputs'] if key in input_}
if self.data.get('originalChannelsOrder') == 'BGR':
values = {k: list(reversed(value)) for k, value in values.items()}
prepared_values = ','.join(
f'{k}[{",".join(str(float(v)) for v in value)}]'
for k, value in values.items()
)
if prepared_values:
args[arg_name] = prepared_values
@staticmethod
def _prepare_placeholders(names: List, placeholders: List, args: dict):
if any(placeholders):
inputs = zip(names, placeholders)
processed_values = []
for name, placeholder in inputs:
if placeholder:
processed_values.append(f'{name}->{placeholder}')
args['freeze_placeholder_with_value'] = ','.join(processed_values)
def get_args(self) -> dict:
if self.is_invalid:
raise ValueError(self.errors)
params_config_map = self.get_param_name_to_param_conf_map()
args = {
params_config_map[key].cli_arg_name: params_config_map[key].to_arg(value)
for key, value in self.data.items()
if params_config_map[key].cli_arg_name
}
if 'inputs' in self.data:
inputs = self.data['inputs']
if 'batch' in self.data:
del args['batch']
self._prepare_channels_order_dependent_values('means', 'mean_values', args)
self._prepare_channels_order_dependent_values('scales', 'scale_values', args)
input_names = [input_['name'] for input_ in inputs]
input_placeholders = [input_.get('freezePlaceholderWithValue') for input_ in inputs]
args['input'] = ','.join(input_names)
input_shapes = [input_['shape'] for input_ in inputs if 'shape' in input_]
if input_shapes:
args['input_shape'] = ','.join(
f'[{",".join(str(int(element)) for element in shape)}]'
for shape in input_shapes
)
input_layouts = {input_['name']: input_['layout'] for input_ in inputs if 'layout' in input_}
if input_layouts:
args['layout'] = input_layouts
self._prepare_placeholders(input_names, input_placeholders, args)
return args
@classmethod
def _transformations_config_to_param(cls, mo_args: dict, params: dict):
pipeline_config_file_path = mo_args.get('transformations_config')
if not pipeline_config_file_path:
return
pipeline_config_file_name = Path(pipeline_config_file_path).stem
if pipeline_config_file_name not in PREDEFINED_CONFIG_NAMES_TO_PATH_MAP:
params['customTransformationsConfig'] = Path(pipeline_config_file_path).name
# If transformations_config file name is not in PREDEFINED_CONFIG_NAMES_TO_PATH_MAP
# Need leave in params only the customTransformationsConfig field
if params['predefinedTransformationsConfig']:
del params['predefinedTransformationsConfig']
@classmethod
def to_params(cls, mo_args: dict):
arg_to_param_map = cls.get_cli_arg_name_to_param_conf_map()
params = {
arg_to_param_map[arg].param_name: arg_to_param_map[arg].to_param(value)
for arg, value in mo_args.items()
if arg in arg_to_param_map
}
pipeline_config_file_path = mo_args.get('tensorflow_object_detection_api_pipeline_config')
if pipeline_config_file_path:
params['isPipelineConfigPersisted'] = True
cls._transformations_config_to_param(mo_args, params)
if 'input' in mo_args:
parsed_values = {arg_name: {} for arg_name in ('mean_values', 'scale_values')}
for arg_name in parsed_values:
if arg_name in mo_args:
for layer_values in mo_args[arg_name].split('],'):
name, vector_str = layer_values.split('[')
parsed_values[arg_name][name] = [float(value) for value in vector_str.rstrip(']').split(',')]
if params.get('originalChannelsOrder') == 'RGB':
parsed_values[arg_name][name] = list(reversed(parsed_values[arg_name][name]))
names = mo_args['input'].split(',')
shapes = [
[int(e) for e in shape_string.lstrip('[').rstrip(']').split(',') if e]
for shape_string in mo_args['input_shape'].split('],')
] if 'input_shape' in mo_args else []
layouts = mo_args.get('layout', {})
freeze_placeholders = cls._parse_freeze_placeholder_with_value(mo_args)
params['inputs'] = []
for index, name in enumerate(names):
input_ = {
'name': name,
}
if index < len(shapes) and shapes[index]:
input_['shape'] = shapes[index]
if name in parsed_values['mean_values']:
input_['means'] = parsed_values['mean_values'][name]
if name in parsed_values['scale_values']:
input_['scales'] = parsed_values['scale_values'][name]
if name in freeze_placeholders:
input_['freezePlaceholderWithValue'] = freeze_placeholders[name]
if name in layouts:
input_['layout'] = layouts[name]
params['inputs'].append(input_)
return params
@staticmethod
def _parse_freeze_placeholder_with_value(mo_args: dict) -> dict:
freeze_placeholder_with_value = mo_args.get('freeze_placeholder_with_value')
if not freeze_placeholder_with_value:
return {}
result = {}
for entry in freeze_placeholder_with_value.split(','):
input_name, freeze_value = entry.split('->')
result[input_name] = freeze_value
return result
|
from utils.reddit import Reddit
|
def build_endpoint_description_strings(
host=None, port=None, unix_socket=None, file_descriptor=None
):
"""
Build a list of twisted endpoint description strings that the server will listen on.
This is to streamline the generation of twisted endpoint description strings from easier
to use command line args such as host, port, unix sockets etc.
"""
socket_descriptions = []
if host and port is not None:
host = host.strip("[]").replace(":", r"\:")
socket_descriptions.append("tcp:port=%d:interface=%s" % (int(port), host))
elif any([host, port]):
raise ValueError("TCP binding requires both port and host kwargs.")
if unix_socket:
socket_descriptions.append("unix:%s" % unix_socket)
if file_descriptor is not None:
socket_descriptions.append("fd:fileno=%d" % int(file_descriptor))
return socket_descriptions
|
import sys
import zmq
import numpy as np
# from marl_agent import ma_messages_pb2 as pb
import ma_messages_pb2 as pb
import time
from collections import defaultdict
class RealNeSZmqBridge(object):
"""
Python interface base class for the simulator. Includes information about the port numbers for communication with
the simulator.
"""
def __init__(self, port=0, start_sim=False, sim_seed=0):
super(RealNeSZmqBridge, self).__init__()
port = int(port)
self.port = int(port)
self.start_sim = start_sim # This is left for future work.
self.sim_seed = sim_seed # Left for future work
self.env_stopped = False
self.state_received_time_ms = 0
self.action_send_time_ms = 0
self.total_received = 0
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
try:
if port == 0 and self.start_sim:
port = self.socket.bind_to_random_port('tcp://*', min_port=5001, max_port=10000, max_tries=100)
print("Got new port for RealNeS interface: ", port)
elif port == 0 and not self.start_sim:
print("Cannot use port %s to bind" % str(port))
print("Please specify correct port")
sys.exit()
else:
self.socket.bind("tcp://*:%s" % str(self.port))
except Exception as e:
print("Cannot bind to tcp://*:%s as port is already in use" % str(port))
print("Please specify different port or use 0 to get free port")
sys.exit()
if start_sim and sim_seed == 0:
max_seed = np.iinfo(np.uint32).max
sim_seed = np.random.randint(0, max_seed)
self.simSeed = sim_seed
self.force_env_stop_v = False
self._total_users = None
self._action_space = None
self._observation_space = None
self._state_space_type = None
self.next_state = None
self.init_state = None # This state will be used when the simulation is reset.
self.reward = 0
self.done = None
self.extraInfo = None
self.next_state_rx = None
self.is_first_step = True # Set this to true to trigger the first step
# Create a socket to collect rewards from the RewardCollector.
#self.context = zmq.Context()
#self.context_reward = zmq.Context()
self.socket_rewards = self.context.socket(zmq.REQ)
#self.socket_rewards.connect("tcp://*:%s" % str(self.port+2)) # use 5557 for requesting the rewards.
self.socket_rewards.connect("tcp://localhost:5557") # use 5557 for requesting the rewards.
# Enable reward collector.
#p = Process(target=RewardCollector, args=(self.port+1, ))
#p.start()
#t = threading.Thread(target=RewardCollector, args=(self.port+1, ))
#t.start()
#p.join()
def initialize_env(self):
"""
Initialize the environment; At first the simulator should be started in order to send the initialization
message which includes total user, action space, state type etc. Those information is than later will be used to
setup the RL agent e.g. neural networks, policy etc.
:return:
"""
request = self.socket.recv()
simInitMsg = pb.MA_SimInitMsg()
simInitMsg.ParseFromString(request)
self._total_users = simInitMsg.total_users- 1 # since we disable one of the users.
self._state_space_type = simInitMsg.state_space_type
self._action_space = simInitMsg.action_space
self._observation_space = simInitMsg.state_space
reply = pb.MA_SimInitAck()
reply.done = False
reply.stopSimReq = False
reply_msg = reply.SerializeToString()
self.socket.send(reply_msg)
def restart_sockets(self):
"""
Restarts the sockets, this is used for restarting the simulation.
:return:
"""
self.socket.close()
self.socket_rewards.close()
time.sleep(1.0)
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://*:%s" % str(self.port))
self.socket_rewards = self.context.socket(zmq.REQ)
self.socket_rewards.connect("tcp://localhost:5557")
def get_total_users(self):
return self._total_users
def get_action_space(self):
return self._action_space
def get_observation_space(self):
return self._observation_space
def get_state_type(self):
return self._state_space_type
def send_action(self, action):
"""
Sends the given action to the realness simulator via the determined socket.
:param action:
:return:
"""
reply = pb.MA_SchedulingGrant()
reply.time_stamp = action
reply.stop_simulation = False
reply_msg = reply.SerializeToString()
self.socket.send(reply_msg)
return True
def get_observation(self):
"""
Gets the new observation, sequence number(sn) for each user.
This is not activated for DRQN agent.
:return:
"""
request = self.socket.recv()
env_state_msg = pb.MA_SchedulingRequest()
env_state_msg.ParseFromString(request)
user_id = env_state_msg.user_id
sn = env_state_msg.SN
state = np.array(env_state_msg.state)
return user_id, sn, state
#obs[user_id][sn] = state
def get_observation_syn(self):
"""
Receive observation for synchronized message for each user, this includes also reward.
:return:
"""
request = self.socket.recv()
env_state_msg = pb.MA_SchedulingRequestSyn()
env_state_msg.ParseFromString(request)
user_id = env_state_msg.user_id
sn = env_state_msg.SN
state = np.array(env_state_msg.state)
reward = env_state_msg.reward
return user_id, sn, state, reward
#obs[user_id][sn] = state
def get_observation_syn_dist(self):
"""
Receive observation for synchronized message for each user, this includes also reward.
This function is separate since we have positional dist of others.(neighr table)
:return:
"""
request = self.socket.recv()
env_state_msg = pb.MA_SchedulingRequestSynDist()
env_state_msg.ParseFromString(request)
user_id = env_state_msg.user_id
sn = env_state_msg.SN
nb_table = env_state_msg.neighbor
pos_of_neighbors = defaultdict(dict)
for user in range(len(nb_table)):
neighbor_entry = nb_table[user]
pos_of_neighbors[user]["xpos"] = neighbor_entry.pos_x
pos_of_neighbors[user]["ypos"] = neighbor_entry.pos_y
pos_of_neighbors[user]["seq_number"] = neighbor_entry.seq_num
pos_of_neighbors[user]["last_updated"] = neighbor_entry.last_update
reward = env_state_msg.reward
# retuwn a dict.
return user_id, sn, pos_of_neighbors, reward
def get_observation_syn_sps(self):
"""
Receive observation for synchronized message for each user, this includes also reward.
SPS algorithms sends float instead of int.
:return:
"""
request = self.socket.recv()
env_state_msg = pb.SPS_SchedulingRequestSyn()
env_state_msg.ParseFromString(request)
user_id = env_state_msg.user_id
sn = env_state_msg.SN
state = np.array(env_state_msg.state)
reward = env_state_msg.reward
return user_id, sn, state, reward
def receive_rewards(self):
"""
Receives the observed reward from the reward collector.
Reward collecter is a subscriber socket which subscribes the published rewards from the simulator beacon agent.
This is not activated for DRQN agent since it already receives the reward from the simulator including with
state information.
:return:
"""
self.socket_rewards.send(b"Send my rewards")
message = self.socket_rewards.recv()
reward_received_msg = pb.MA_RewardSentAll()
reward_received_msg.ParseFromString(message)
# print("Received reply [ %s ]" % (message))
return reward_received_msg
def get_reward(self):
return self.reward
def restart_env(self):
"""
This function is used to reset the observation space of the users.
:return:
"""
request = self.socket.recv() # first receive a scheduling request
reply = pb.MA_SchedulingGrant()
reply.time_stamp = -1 # this will indicate that we should restart the script.
reply.stop_simulation = True
reply_msg = reply.SerializeToString()
self.socket.send(reply_msg)
|
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.tests import base
from solum.tests import utils
import solum.uploaders.swift as uploader
class SwiftUploadTest(base.BaseTestCase):
def setUp(self):
super(SwiftUploadTest, self).setUp()
@mock.patch('open')
@mock.patch('clients.OpenStackClients')
@mock.patch('oslo.config.cfg.CONF.worker')
def test_upload(self, mock_config, mock_client, mock_open):
ctxt = utils.dummy_context()
orig_path = "original path"
assembly_id = "1234"
build_id = "5678"
container = 'fake-container'
mock_config.log_upload_swift_container.return_value = container
mock_swift = mock.MagicMock()
mock_client.return_value.swift.return_value = mock_swift
fake_file = mock.MagicMock()
mock_open.return_value = fake_file
swiftupload = uploader.SwiftUpload(ctxt, orig_path,
assembly_id, build_id,
"fakestage")
swiftupload.write_userlog_row = mock.MagicMock()
swiftupload.upload()
swift_info = {'container': container}
swiftupload.write_userlog_row.assert_called_once_with(orig_path,
swift_info)
mock_swift.put_container.assert_called_once_with(container)
mock_swift.put_object.assembly_id(container, orig_path, fake_file)
|
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1384542354.113604
_template_filename=u'templates/webapps/galaxy/base_panels.mako'
_template_uri=u'/webapps/galaxy/base_panels.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['masthead', 'javascripts', 'late_javascripts', 'get_user_json', 'title']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/base/base_panels.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n')
# SOURCE LINE 4
__M_writer(u'\n\n')
# SOURCE LINE 8
__M_writer(u'\n\n')
# SOURCE LINE 34
__M_writer(u'\n\n')
# SOURCE LINE 55
__M_writer(u'\n\n')
# SOURCE LINE 244
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_masthead(context):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
app = context.get('app', UNDEFINED)
trans = context.get('trans', UNDEFINED)
def tab(id,display,href,target='_parent',visible=True,extra_class='',menu_options=None):
context.caller_stack._push_frame()
try:
self = context.get('self', UNDEFINED)
len = context.get('len', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 66
__M_writer(u'\n')
# SOURCE LINE 69
__M_writer(u' \n ')
# SOURCE LINE 70
cls = ""
a_cls = ""
extra = ""
if extra_class:
cls += " " + extra_class
if self.active_view == id:
cls += " active"
if menu_options:
cls += " dropdown"
a_cls += " dropdown-toggle"
extra = "<b class='caret'></b>"
style = ""
if not visible:
style = "display: none;"
# SOURCE LINE 85
__M_writer(u'\n <li class="')
# SOURCE LINE 86
__M_writer(unicode(cls))
__M_writer(u'" style="')
__M_writer(unicode(style))
__M_writer(u'">\n')
# SOURCE LINE 87
if href:
# SOURCE LINE 88
__M_writer(u' <a class="')
__M_writer(unicode(a_cls))
__M_writer(u'" data-toggle="dropdown" target="')
__M_writer(unicode(target))
__M_writer(u'" href="')
__M_writer(unicode(href))
__M_writer(u'">')
__M_writer(unicode(display))
__M_writer(unicode(extra))
__M_writer(u'</a>\n')
# SOURCE LINE 89
else:
# SOURCE LINE 90
__M_writer(u' <a class="')
__M_writer(unicode(a_cls))
__M_writer(u'" data-toggle="dropdown">')
__M_writer(unicode(display))
__M_writer(unicode(extra))
__M_writer(u'</a>\n')
pass
# SOURCE LINE 92
if menu_options:
# SOURCE LINE 93
__M_writer(u' <ul class="dropdown-menu">\n')
# SOURCE LINE 94
for menu_item in menu_options:
# SOURCE LINE 95
if not menu_item:
# SOURCE LINE 96
__M_writer(u' <li class="divider"></li>\n')
# SOURCE LINE 97
else:
# SOURCE LINE 98
__M_writer(u' <li>\n')
# SOURCE LINE 99
if len ( menu_item ) == 1:
# SOURCE LINE 100
__M_writer(u' ')
__M_writer(unicode(menu_item[0]))
__M_writer(u'\n')
# SOURCE LINE 101
elif len ( menu_item ) == 2:
# SOURCE LINE 102
__M_writer(u' ')
name, link = menu_item
__M_writer(u'\n <a href="')
# SOURCE LINE 103
__M_writer(unicode(link))
__M_writer(u'">')
__M_writer(unicode(name))
__M_writer(u'</a>\n')
# SOURCE LINE 104
else:
# SOURCE LINE 105
__M_writer(u' ')
name, link, target = menu_item
__M_writer(u'\n <a target="')
# SOURCE LINE 106
__M_writer(unicode(target))
__M_writer(u'" href="')
__M_writer(unicode(link))
__M_writer(u'">')
__M_writer(unicode(name))
__M_writer(u'</a>\n')
pass
# SOURCE LINE 108
__M_writer(u' </li>\n')
pass
pass
# SOURCE LINE 111
__M_writer(u' </ul>\n')
pass
# SOURCE LINE 113
__M_writer(u' </li>\n ')
return ''
finally:
context.caller_stack._pop_frame()
_ = context.get('_', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 58
__M_writer(u'\n\n')
# SOURCE LINE 61
__M_writer(u' <div style="position: relative; right: -50%; float: left;">\n <div style="display: block; position: relative; right: 50%;">\n\n <ul class="nav" border="0" cellspacing="0">\n \n ')
# SOURCE LINE 114
__M_writer(u'\n\n')
# SOURCE LINE 117
__M_writer(u' ')
__M_writer(unicode(tab( "analysis", _("Analyze Data"), h.url_for( controller='/root', action='index' ) )))
__M_writer(u'\n \n')
# SOURCE LINE 120
__M_writer(u' ')
__M_writer(unicode(tab( "workflow", _("Workflow"), "javascript:frame_manager.frame_new({title: 'Workflow', type: 'url', content: '" + h.url_for( controller='/workflow', action='index' ) + "'});")))
__M_writer(u'\n\n')
# SOURCE LINE 123
__M_writer(u' ')
menu_options = [
[ _('Data Libraries'), h.url_for( controller='/library', action='index') ],
None,
[ _('Published Histories'), h.url_for( controller='/history', action='list_published' ) ],
[ _('Published Workflows'), h.url_for( controller='/workflow', action='list_published' ) ],
[ _('Published Visualizations'), h.url_for( controller='/visualization', action='list_published' ) ],
[ _('Published Pages'), h.url_for( controller='/page', action='list_published' ) ]
]
tab( "shared", _("Shared Data"), h.url_for( controller='/library', action='index'), menu_options=menu_options )
# SOURCE LINE 133
__M_writer(u'\n \n')
# SOURCE LINE 136
__M_writer(u' ')
menu_options = [
[ _('Sequencing Requests'), h.url_for( controller='/requests', action='index' ) ],
[ _('Find Samples'), h.url_for( controller='/requests', action='find_samples_index' ) ],
[ _('Help'), app.config.get( "lims_doc_url", "http://main.g2.bx.psu.edu/u/rkchak/p/sts" ), "galaxy_main" ]
]
tab( "lab", "Lab", None, menu_options=menu_options, visible=( trans.user and ( trans.user.requests or trans.app.security_agent.get_accessible_request_types( trans, trans.user ) ) ) )
# SOURCE LINE 143
__M_writer(u'\n\n\n \n')
# SOURCE LINE 148
__M_writer(u' ')
menu_options = [
[_('New Track Browser'), "javascript:frame_manager.frame_new({title: 'Trackster', type: 'url', content: '" + h.url_for( controller='/visualization', action='trackster' ) + "'});"],
[_('Saved Visualizations'), "javascript:frame_manager.frame_new({ type: 'url', content : '" + h.url_for( controller='/visualization', action='list' ) + "'});" ]
]
tab( "visualization", _("Visualization"), "javascript:frame_manager.frame_new({title: 'Trackster', type: 'url', content: '" + h.url_for( controller='/visualization', action='list' ) + "'});", menu_options=menu_options )
# SOURCE LINE 154
__M_writer(u'\n\n')
# SOURCE LINE 157
if app.config.get_bool( 'enable_cloud_launch', False ):
# SOURCE LINE 158
__M_writer(u' ')
menu_options = [
[_('New Cloud Cluster'), h.url_for( controller='/cloudlaunch', action='index' ) ],
]
tab( "cloud", _("Cloud"), h.url_for( controller='/cloudlaunch', action='index'), menu_options=menu_options )
# SOURCE LINE 163
__M_writer(u'\n')
pass
# SOURCE LINE 165
__M_writer(u'\n')
# SOURCE LINE 167
__M_writer(u' ')
__M_writer(unicode(tab( "admin", "Admin", h.url_for( controller='/admin', action='index' ), extra_class="admin-only", visible=( trans.user and app.config.is_admin_user( trans.user ) ) )))
__M_writer(u'\n \n')
# SOURCE LINE 170
__M_writer(u' ')
menu_options = []
if app.config.biostar_url:
menu_options = [ [_('Galaxy Q&A Site'), h.url_for( controller='biostar', action='biostar_redirect', biostar_action='show/tag/galaxy' ), "_blank" ],
[_('Ask a question'), h.url_for( controller='biostar', action='biostar_question_redirect' ), "_blank" ] ]
menu_options.extend( [
[_('Support'), app.config.get( "support_url", "http://wiki.g2.bx.psu.edu/Support" ), "_blank" ],
[_('Tool shed wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/Tool%20Shed" ), "_blank" ],
[_('Galaxy wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/" ), "_blank" ],
[_('Video tutorials (screencasts)'), app.config.get( "screencasts_url", "http://galaxycast.org" ), "_blank" ],
[_('How to Cite Galaxy'), app.config.get( "citation_url", "http://wiki.g2.bx.psu.edu/Citing%20Galaxy" ), "_blank" ]
] )
if app.config.get( 'terms_url', None ) is not None:
menu_options.append( [_('Terms and Conditions'), app.config.get( 'terms_url', None ), '_blank'] )
tab( "help", _("Help"), None, menu_options=menu_options )
# SOURCE LINE 185
__M_writer(u'\n \n')
# SOURCE LINE 188
__M_writer(u' ')
# Menu for user who is not logged in.
menu_options = [ [ _("Login"), h.url_for( controller='/user', action='login' ), "galaxy_main" ] ]
if app.config.allow_user_creation:
menu_options.append( [ _("Register"), h.url_for( controller='/user', action='create', cntrller='user' ), "galaxy_main" ] )
extra_class = "loggedout-only"
visible = ( trans.user == None )
tab( "user", _("User"), None, visible=visible, menu_options=menu_options )
# Menu for user who is logged in.
if trans.user:
email = trans.user.email
else:
email = ""
menu_options = [ [ '<a>Logged in as <span id="user-email">%s</span></a>' % email ] ]
if app.config.use_remote_user:
if app.config.remote_user_logout_href:
menu_options.append( [ _('Logout'), app.config.remote_user_logout_href, "_top" ] )
else:
menu_options.append( [ _('Preferences'), h.url_for( controller='/user', action='index', cntrller='user' ), "galaxy_main" ] )
menu_options.append( [ 'Custom Builds', h.url_for( controller='/user', action='dbkeys' ), "galaxy_main" ] )
logout_url = h.url_for( controller='/user', action='logout' )
menu_options.append( [ 'Logout', logout_url, "_top" ] )
menu_options.append( None )
menu_options.append( [ _('Saved Histories'), h.url_for( controller='/history', action='list' ), "galaxy_main" ] )
menu_options.append( [ _('Saved Datasets'), h.url_for( controller='/dataset', action='list' ), "galaxy_main" ] )
menu_options.append( [ _('Saved Pages'), h.url_for( controller='/page', action='list' ), "_top" ] )
menu_options.append( [ _('API Keys'), h.url_for( controller='/user', action='api_keys', cntrller='user' ), "galaxy_main" ] )
if app.config.use_remote_user:
menu_options.append( [ _('Public Name'), h.url_for( controller='/user', action='edit_username', cntrller='user' ), "galaxy_main" ] )
extra_class = "loggedin-only"
visible = ( trans.user != None )
tab( "user", "User", None, visible=visible, menu_options=menu_options )
# SOURCE LINE 222
__M_writer(u'\n \n')
# SOURCE LINE 226
__M_writer(u' </ul>\n\n </div>\n </div>\n \n')
# SOURCE LINE 232
__M_writer(u' <div class="title">\n <a href="')
# SOURCE LINE 233
__M_writer(unicode(h.url_for( app.config.get( 'logo_url', '/' ) )))
__M_writer(u'">\n <img border="0" src="')
# SOURCE LINE 234
__M_writer(unicode(h.url_for('/static/images/galaxyIcon_noText.png')))
__M_writer(u'">\n Galaxy\n')
# SOURCE LINE 236
if app.config.brand:
# SOURCE LINE 237
__M_writer(u' <span>/ ')
__M_writer(unicode(app.config.brand))
__M_writer(u'</span>\n')
pass
# SOURCE LINE 239
__M_writer(u' </a>\n </div>\n\n <div class="quota-meter-container"></div>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
parent = context.get('parent', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 6
__M_writer(u'\n ')
# SOURCE LINE 7
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_late_javascripts(context):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
def get_user_json():
return render_get_user_json(context)
parent = context.get('parent', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 36
__M_writer(u'\n')
# SOURCE LINE 37
__M_writer(unicode(parent.late_javascripts()))
__M_writer(u'\n\n<!-- quota meter -->\n')
# SOURCE LINE 40
__M_writer(unicode(h.templates( "helpers-common-templates", "template-user-quotaMeter-quota", "template-user-quotaMeter-usage" )))
__M_writer(u'\n')
# SOURCE LINE 41
__M_writer(unicode(h.js( "mvc/base-mvc", "mvc/user/user-model", "mvc/user/user-quotameter" )))
__M_writer(u'\n<script type="text/javascript">\n\n // start a Galaxy namespace for objects created\n window.Galaxy = window.Galaxy || {};\n\n // set up the quota meter (And fetch the current user data from trans)\n Galaxy.currUser = new User( ')
# SOURCE LINE 48
__M_writer(unicode(get_user_json()))
__M_writer(u" );\n Galaxy.quotaMeter = new UserQuotaMeter({\n model : Galaxy.currUser,\n el : $( document ).find( '.quota-meter-container' )\n }).render();\n\n</script>\n")
return ''
finally:
context.caller_stack._pop_frame()
def render_get_user_json(context):
context.caller_stack._push_frame()
try:
AssertionError = context.get('AssertionError', UNDEFINED)
int = context.get('int', UNDEFINED)
h = context.get('h', UNDEFINED)
float = context.get('float', UNDEFINED)
util = context.get('util', UNDEFINED)
trans = context.get('trans', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 10
__M_writer(u'\n')
# SOURCE LINE 11
"""Bootstrapping user API JSON"""
#TODO: move into common location (poss. BaseController)
if trans.user:
user_dict = trans.user.get_api_value( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
user_dict['quota_percent'] = trans.app.quota_agent.get_percent( trans=trans )
else:
usage = 0
percent = None
try:
usage = trans.app.quota_agent.get_usage( trans, history=trans.history )
percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage )
except AssertionError, assertion:
# no history for quota_agent.get_usage assertion
pass
user_dict = {
'total_disk_usage' : int( usage ),
'nice_total_disk_usage' : util.nice_size( usage ),
'quota_percent' : percent
}
# SOURCE LINE 32
__M_writer(u'\n')
# SOURCE LINE 33
__M_writer(unicode(h.to_json_string( user_dict )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 4
__M_writer(u'Galaxy')
return ''
finally:
context.caller_stack._pop_frame()
|
# Exercise 4.5
# Author: Noah Waterfield Price
import sys
val = eval(sys.argv[1])
print '%s is a Python %s object' % (val, type(val))
"""
Sample run:
python objects_cml.py 1
1 is a Python <type 'int'> object
python objects_cml.py 1.0
1.0 is a Python <type 'float'> object
python objects_cml.py 1 + 1j
(1 + 1j) is a Python <type 'complex'> object
python objects_cml.py [1,2]
[1,2] is a Python <type 'list'> object
python objects_cml.py (1,2)
(1,2) is a Python <type 'tuple'> object
python objects_cml.py "'hello'"
hello is a Python <type 'string'> object
"""
|
#!/bin/env python
##
# @file
# This file is part of SeisSol.
#
# @author Alexander Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer)
#
# @section LICENSE
# Copyright (c) 2014, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
# Sets up the benchmark runs.
#
import logging
## Analyzation phase of the benchmark runs.
class Analyze:
def __init__( self ):
self.m_logger = logging.getLogger('Setup')
## Prepares the directories for the analyzed output.
#
# @param i_benchmarkRuns benchmarks.
# @param i_builds builds configurations.
def setupOutputDirectories( self,
i_benchmarkRuns,
i_builds ):
l_bashCommands = '\n# preparing output directories\n'
l_bashCommands = l_bashCommands + "echo 'preparing output directories'\n"
l_bashCommands = l_bashCommands + 'mkdir $WORKING_DIRECTORY/output/\n'
for l_build in i_builds:
l_bashCommands = l_bashCommands+\
'mkdir $WORKING_DIRECTORY/output/' + l_build['id'] + '\n'
for l_benchmark in i_benchmarkRuns:
l_bashCommands = l_bashCommands+\
'mkdir $WORKING_DIRECTORY/output/' + l_build['id'] + '/' + l_benchmark['id'] + '\n'
l_bashCommands = l_bashCommands+\
'mkdir $WORKING_DIRECTORY/output/' + l_build['id'] + '/' + l_benchmark['id'] + '/receivers' + '\n'
return l_bashCommands
## Prepares the receivers for comparisons
#
# @param i_type type of the workflow.
# @param i_benchmarkRuns benchmarks.
# @param i_builds build configurations.
def prepareReceivers( self,
i_type,
i_benchmarkRuns,
i_builds ):
self.m_logger.info( "preparing receivers" )
l_bashCommands = '\n# preparing receivers\n'
l_bashCommands = l_bashCommands + "echo 'preparing receivers'\n"
for l_build in i_builds:
for l_benchmark in i_benchmarkRuns:
l_bashArguments = " -i " + "$WORKING_DIRECTORY/runs/" + l_build['id'] + "/" + l_benchmark['id'] + "/output"+\
" -o " + "$WORKING_DIRECTORY/output/" + l_build['id'] + "/" + l_benchmark['id'] + "/receivers"
l_bashCommands = l_bashCommands+\
"sh ${SCRIPTS_DIRECTORY}/analyze/remove_ranks.sh" + l_bashArguments +"\n"
return l_bashCommands
## Compares the receivers with a reference solution.
#
# @param i_benchmarkRuns benchmark runs to compare.
def compareReceivers( self,
i_benchmarkRuns,
i_builds ):
self.m_logger.info( "comparing receivers" )
l_bashCommands = "echo 'comparing receivers'\n"
for l_build in i_builds:
for l_benchmark in i_benchmarkRuns:
l_pythonArguments = "$INPUT_DIRECTORY/benchmarks/" + l_benchmark['name'] + "/references/" + l_build['order']+ " "+\
"$WORKING_DIRECTORY/output/" + l_build['id'] + "/" + l_benchmark['id'] + "/receivers" + " "+\
"$WORKING_DIRECTORY/output/" + l_build['id'] + "/" + l_benchmark['id'] + "/plots.pdf" + " "+\
"$WORKING_DIRECTORY/output/" + l_build['id'] + "/" + l_benchmark['id'] + "/misfits.csv"
l_bashCommands = l_bashCommands + 'python ${SCRIPTS_DIRECTORY}/analyze/compare_receivers.py ' + l_pythonArguments + " & \n"
return l_bashCommands
# Extracts the data of the log files from convergence runs.
#
# @param i_regularExpression regular expression for the log files.
def extractLogData( self,
i_regularExpressions ):
l_bashCommands = ''
# iterate over the regular expressions and genrate plots for each
for l_regularExpression in i_regularExpressions:
l_bashCommands = l_bashCommands+\
'mkdir -p $WORKING_DIRECTORY/output/' + l_regularExpression + '\n'
l_pythonArguments = "--log_dir=$WORKING_DIRECTORY/logs/ "+\
"--log_regexp="+l_regularExpression+".out "+\
"--output_dir=$WORKING_DIRECTORY/output/" + l_regularExpression
l_bashCommands = l_bashCommands + 'python ${SCRIPTS_DIRECTORY}/analyze/convergence.py ' + l_pythonArguments + " & \n"
return l_bashCommands
## Preprocessing of the benchmark runs.
#
# @param i_type type of the workflow.
# @param i_benchmarks benchmarks.
# @param i_builds build configurations.
def postProcess( self,
i_type,
i_benchmarks,
i_builds,
i_regularExpressions = set() ):
self.m_logger.info( "postprocessing benchmarks" )
#
# bash interface
#
if( i_type == 'bash' ):
l_bashCommands ='''
#
# Postprocessing benchmarks
#
if [ ${ANALYZE} != "0" ]
then
echo "$(date) postprocessing benchmarks"
'''
# process runs with receivers
l_receiverRuns = [l_entry for l_entry in i_benchmarks['runs'] if l_entry['name'] != 'periodic']
if( len(l_receiverRuns) != 0 ):
l_bashCommands = l_bashCommands+\
self.setupOutputDirectories( i_benchmarkRuns = i_benchmarks['runs'],
i_builds = i_builds )
l_bashCommands = l_bashCommands+\
self.prepareReceivers( i_type = i_type,
i_benchmarkRuns = i_benchmarks['runs'],
i_builds = i_builds )
l_bashCommands = l_bashCommands+\
self.compareReceivers( i_benchmarkRuns = i_benchmarks['runs'],
i_builds = i_builds )
# process convergence runs
if( len(i_regularExpressions) > 0 ):
l_bashCommands = l_bashCommands + self.extractLogData( i_regularExpressions )
# close analysis
l_bashCommands = l_bashCommands+\
"\n echo \'waiting for postprocessing jobs to finish\' \n"+\
"wait\n"+\
"\nfi\n"+\
"# finished postprocessing benchmarks\n"
return l_bashCommands
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/distributed.models.lgb.ipynb (unless otherwise specified).
__all__ = ['LGBMForecast']
# Cell
import warnings
import lightgbm as lgb
# Cell
class LGBMForecast(lgb.dask.DaskLGBMRegressor):
if lgb.__version__ <= '3.2.1':
warnings.warn(
"It is recommended to build LightGBM from source following the instructions here: "
"https://github.com/microsoft/LightGBM/tree/master/python-package#install-from-github, since "
"the current LightGBM version might be affected by https://github.com/microsoft/LightGBM/issues/4026, "
"which was fixed after 3.2.1."
)
@property
def model_(self):
return self.booster_
|
#!/usr/bin/python
import socket
import sys
import os
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = int(os.environ['PORT'])
server_address = ('0.0.0.0', port)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
while True:
print >>sys.stderr, 'waiting for a connection'
conn, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
while True:
data = conn.recv(16)
print >>sys.stderr, 'received "%s"' % data
if data:
print >>sys.stderr, 'sending data back to the client'
conn.sendall(data)
else:
print >>sys.stderr, 'no more data from', client_address
break
finally:
conn.close()
|
a = [3, 4, 5]
b = [i for i in a if i > 4]
# Or:
b = filter(lambda x: x > 4, a)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
def gene_file_list(filename, num):
with open(filename, 'w') as f:
for i in range(num):
if i % 5:
continue
f.write('img/%.6d.png ' % i)
f.write('img/%.6d.png\n' % i)
f.close()
def extract_image_from_video(video_path, video_name, show_flag=False):
cap = cv2.VideoCapture(video_path + video_name)
ret, frame = cap.read()
cnt = 0
while ret:
if show_flag:
cv2.imshow('extract images form video', frame)
cv2.waitKey(10)
ret, frame = cap.read()
if not ret:
break
h, w, c = frame.shape
frame = cv2.resize(frame, (w / 2, h / 2))
if cnt % 5:
cnt += 1
continue
cv2.imwrite(video_path + "sfm/%.6d.jpg" % cnt, frame)
# ret, frame = cap.read()
print cnt
cnt += 1
return cnt
|
vel = int(input("Velocidade: "))
if vel > 80:
x = int(vel - 80)
val = float(x*7.00)
print("Você foi multado em R${:.2f}, por excesso de velocidade".format(val))
else:
print("Você não possui multas!")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported')
with open('README.rst') as readme_file:
readme = readme_file.read()
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt', session=False)
requirements = [str(ir.req) for ir in install_reqs]
extra_reqs = parse_requirements('requirements_dev.txt', session=False)
extra_reqs = [str(ir.req) for ir in extra_reqs]
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
if 'build' in path.split('/'):
continue
if '.git' in path.split('/'):
continue
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files('pyha/simulation/sim_include/')
setup(
name='pyha',
version='0.0.11',
description="Pyha",
long_description=readme,
author="Gaspar Karm",
author_email='gkarm@live.com',
url='https://github.com/gasparka/pyha',
packages=find_packages(),
package_data={'pyha': extra_files},
extras_require={'dev': [extra_reqs]},
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords='pyha'
)
|
import wx
from src.ui.menubar import MenuBar
from src.ui.download_list import DownloadList
class MainApplication(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainApplication, self).__init__(*args, **kwargs)
menubar = MenuBar(master=self)
self.download_list = DownloadList(self)
def update_list(self):
self.download_list.update_ui()
self.Refresh()
self.Update()
self.download_list.InitUI()
|
from granite.utils.functional import boost_fn, bf, Function, dump_map, fwd, apply, return_function
from pampy import match, _
from collections.abc import Iterable, Iterator, Mapping
from types import GeneratorType
def make_bench(bench, format):
def make_bench_impl(fn, *args, **kwargs):
return bf(format) * bf(bench) << (lambda : fn(*args, **kwargs))
return make_bench_impl
class Bench:
def __init__(self, bench, format):
self.bench = bench
self.format = format
def format(self, *args, **kwargs):
return self.format(self.bench(), *args, **kwargs)
def tail_file(file):
'''Change the stream position to the end of the file.'''
# Set the offset to 0 from the end of the file (2).
file.seek(0, 2)
def load_and_map(files):
return dict(map(lambda f:(f.name, f.readlines()), map(open, files)))
@boost_fn
def call_and_log(fn, out_files, log_files):
dump_map(tail_file, log_files)
result = fn()
log_data = load_and_map(log_files)
out_data = load_and_map(out_files)
return result, out_data, log_data
class Task(Function):
def __init__(self, cmd, log_files=None, out_files=None):
self.cmd = cmd
self.log_files = not_none_fwd_or(log_files, [])
self.out_files = not_none_fwd_or(out_files, [])
super().__init__(self, call_once(call_and_log,
self.cmd, self.out_files, self.log_files))
def __repr__(self):
return repr(self.cmd)
def process_container(d):
'''Will explore containers to a json compatible data.'''
return match(d,
# Special handle for str that is instance of Iterable but
# does not need to be processed.
str, fwd,
# Handle for Mapping types, we do not want to check if
# keys are callable. We only iterate on values.
Mapping, lambda m : type(m)(zip(m.keys(), process_container(m.values()))),
Iterable, lambda i : list(map(process_container, i)),
callable, apply,
_, fwd)
@boost_fn
def format_bench(data, bench_type, bench_version):
return dict(data=data, bench_type=bench_type, bench_version=bench_version)
@boost_fn
def rename_result(data, old_name, new_name):
data[new_name] = data.pop(old_name)
return data
@boost_fn
def rename_results(data, old_names, new_names):
dump_map(rename_result << data, old_names, new_names)
return data
@boost_fn
def rename_bench(old_name, new_name):
rename_fun = match((old_name, new_name),
(str, str), return_function(rename_result),
(Iterable, Iterable), return_function(rename_results),
(_, _), lambda a, b : print(f'{a} & {b} must be both str or iterable'))
return rename_fun.rpartial(old_name, new_name)
@boost_fn
def drop_result(data, name):
data.pop(name)
return data
@boost_fn
def drop_results(data, names):
dump_map(drop_result << data, names)
return data
@boost_fn
def drop_bench(name):
drop_fun = match(name,
str, return_function(drop_result),
Iterable, return_function(drop_results),
_, lambda a : print(f'{a} must be str or iterable'))
return drop_fun >> name
|
class Solution:
def minCostClimbingStairs(self, cost):
cost_len = len(cost)
table_len = cost_len + 2
table = [10 ** 6] * table_len
table[0] = 0
for i in range(1, table_len):
prev = table[i-1]
table[i] = min(
table[i],
prev + (cost[i-1] if i < cost_len else 0)
)
if (j := i + 1) < table_len:
table[j] = min(
table[j],
prev + (cost[i] if i < cost_len else 0)
)
# return final element.
return table[-1]
|
acumulador = 0
MaisVelho = 0
contador = 0
NomeDoMaisVelho = ''
for p in range(1,5):
print(f'-----{p}ª PESSOA-----')
nome = str(input('Nome:')).strip()
idade = int(input('Idade:'))
sexo = str(input('Sexo [M/F]:')).lower().strip()
acumulador += idade
if p == 1 and sexo in 'Mm':
MaisVelho = idade
NomeDoMaisVelho = nome
if idade > MaisVelho and sexo == 'm':
MaisVelho = idade
NomeDoMaisVelho = nome
if idade < 20 and sexo == 'f':
contador += 1
print(f'A média de idade é:{acumulador/4}')
print(f'A idade do homem mais velho é {MaisVelho} e o nome dele é {NomeDoMaisVelho}')
print(f'Existem {contador} mulheres com menos de 20 anos')
|
#!/usr/bin/env python
#process the qkids.tsv file to create qkis_score data
#*NB* these athletic idiots are unable to process times properly
#so their spreadsheet had 3.50 for 3:50 and 0.01 instead of 0:01
#bah :(
import sys, os, re, time
medir = os.path.dirname(sys.argv[0])
if not medir: medir = os.getcwd()
try:
from athlib.utils import parse_hms
except ImportError:
sys.path.insert(0,os.path.dirname(medir))
from athlib import parse_hms, normalize_event_code, normalize_gender
def num(s):
try:
v = float(s)
iv = int(v)
return iv if v==iv else v
except:
pass
endCommaPat = re.compile(r',(?P<space>\s*)(?P<bracket>\]|\)|\})',re.M)
def shorten(line, indent=4, inc=2):
if len(line)<80: return line
indent = min(indent+inc,8)
space = indent*'\x20'
for pat in ('),', '],', '},', ','):
x = line.rfind(pat,0,80-len(pat))
if x>=0:
x += len(pat)
return line[:x]+'\n'+shorten(space+line[x:].strip(), indent, inc)
return line
compTypeMap = {
'Wessex League': 'QKWL',
'Wessex League (U13)': 'QKWLU13',
'QuadKids Secondary': 'QKSEC',
'QuadKids Primary': 'QKPRI',
'QuadKids Start': 'QKSTA',
'QuadKids Club': 'QKCLUB',
'QuadKids Club U13': 'QKCLU13',
'QuadKids Club U9': 'QKCLU9',
'QuadKids Pre-Start': 'QKPRE',
}
ecodeMap = {
'50m': '50',
'75m': '75',
'70/75mh': '70H',
'100m': '100',
'300m': '300',
'400m': '400',
'600m': '600',
'800m': '800',
'relay': '4x100',
'long_jump': 'LJ',
'howler': 'OT',
'slj': 'SLJ',
'shot': 'SP',
}
def possible_hms(v):
return parse_hms(v) if ':' in v else float(v)
def translate(txt, _70H):
try:
txt = decode('utf-8')
except:
pass
out = [].append
L = txt.split('\n')
i = 0
n = len(L)
while i<n:
line = L[i]
i += 1
if not line.strip(): continue
line = line.split('\t')
line = [_.strip() for _ in line if _.strip()]
ll = len(line)
if ll!=1: raise ValueError('bad comptype line %d:%r in tsv file' % (i, L[i]))
try:
if ll==1:
#start of a new label
l0 = line[0]
compType = compTypeMap[l0]
out('\x20\x20%r: {' % compType)
data = [].append
for kind in 'ecode inc min max'.split():
line = L[i]
i += 1
line = line.split('\t')
line = [_.strip() for _ in line if _.strip()]
if kind=='ecode':
data([ecodeMap[_.lower()] for _ in line[1:6]])
else:
data([possible_hms(_) for _ in line[1:6]])
data = data.__self__
t = [].append
for j in range(5):
t('\x20\x20\x20\x20%r: ['%data[0][j])
if data[0][j]=='70H':
_70H.append(compType);
t('%s, ' % data[1][j])
t('%s, ' % data[2][j])
t('%s]%s' % (data[3][j],',\n' if j<4 else ','))
out(''.join(t.__self__))
out('\x20\x20\x20\x20},')
except:
raise ValueError('unexpected error in line %s: %s' % (i,L[i]))
out('\x20\x20},')
return out.__self__
def installText(s, fn, c='#', t=''):
if not os.path.isfile(fn):
raise ValueError('cannot locate file %r' % fn)
with open(fn,'r') as f:
txt = f.read()
start = '%sstart qkids tables' % c
i = txt.find('\n'+start)
if i>=0:
iold = txt.find('\n',i+1)
if iold>=0:
iold += 1
else:
raise ValueError('cannot find end of start line in %r' % fn)
end = '%send qkids tables\n' % c
nend = '\n' + end
j = txt.find(nend)
if j>=i:
jold = j
j += len(nend)
else:
raise ValueError('found start of qkids tables did not find end in %r' % fn)
sold = txt[iold:jold]
if sold==s:
print('code unchanged in %r' % fn)
return
txt = ''.join((txt[:i] + '\n', start, t, '\n', s, '\n', end, txt[j:]))
else:
raise ValueError('could not find start of qkids tables in %r' % fn)
bfn = os.path.splitext(os.path.basename(fn))
bfn = os.path.normpath(os.path.join(medir,'..','tmp','%s-%s%s' % (bfn[0],int(time.time()),bfn[1])))
import shutil
shutil.move(fn, bfn)
with open(fn,'w') as f:
f.write(txt)
def main():
tableName = '_qkidsTables'
s = ['%s = {' % tableName]
install = '--install' in sys.argv
if install:
while '--install' in sys.argv:
sys.argv.remove('--install')
FN = sys.argv[1:]
if not FN:
from glob import glob
FN = glob(os.path.join(medir,'data','qkids.tsv'))
_70H = []
for fn in FN:
with open(fn,'r') as f:
s.extend(translate(f.read(), _70H))
s.append('}')
_70H = '\n'.join(("%s['%s']['75H'] = %s['%s']['70H']" % (tableName,_,tableName,_) for _ in _70H))
s1 = '' if not _70H else '\n'+_70H
s2 = shorten('\n_compTypeMap = {%s}' % ', '.join(("'%s': '%s'" % (k.replace(' ','').upper(),v) for k,v in compTypeMap.items())),
indent=2, inc=0)
s = endCommaPat.sub(r'\g<space>\g<bracket>','\n'.join(s))[:-2]
if install:
t = ' created by %s %s' % (os.path.basename(sys.argv[0]),time.asctime())
installText(s+s1+s2, os.path.normpath(os.path.join(medir,'..','athlib','qkids_score.py')), c='#', t=t)
js = '/* eslint-disable */\nvar '+s + ';'
s1 = '\n '+s1[1:].replace('\n',';\n') + ';'
installText(js+s1+'\nvar '+s2[1:]+';\n/* eslint-enable */', os.path.normpath(os.path.join(medir,'..','js','src', 'qkids_score.js')), c='// ', t=t)
else:
print(s+s1+s2)
if __name__=='__main__':
main()
|
#!/usr/bin/env python3
import os
import io
import sys
import re
import argparse
import json
import xml.etree.ElementTree as ET
# on msys, use crlf output
nl = None
if sys.platform == 'msys':
nl = "\r\n"
# Get the file, relative to this script's location (same directory)
# that way we're not sensitive to CWD
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) + os.path.sep
with open(pathname + 'spirv.core.grammar.json', mode='r') as f:
spirv = json.load(f)
with open(pathname + 'extinst.glsl.std.450.grammar.json', mode='r') as f:
glsl450 = json.load(f)
# open XML registry
registry = ET.parse(pathname + 'spir-v.xml').getroot()
# open the file for write
header = open(pathname + 'spirv_gen.h', mode='w', newline = nl)
ops_header = open(pathname + 'spirv_op_helpers.h', mode='w', newline = nl)
cpp = open(pathname + 'spirv_gen.cpp', mode='w', newline = nl)
###############################################################################
##
## Headers
##
###############################################################################
def prefix_star(line):
if line == '':
return ' *'
else:
return ' * ' + line
def operand_name(name, lowercase_first = True):
name = name.replace('\n', ' ')
# special case a few very awkward names
if re.search(r'member [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'members'
if re.search(r'parameter [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'parameters'
if re.search(r'argument [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'arguments'
if re.search(r'variable, parent.*\.\.\.', name, re.RegexFlag.I):
return 'parents'
name = re.sub(r'<<(.*),(.*)>>', r'\2', name)
name = re.sub(r'[ \'~<>./-]', '', name)
if name.lower() == 'interface':
return 'iface'
if name.lower() == 'default':
return 'def'
if lowercase_first:
return name[0].lower() + name[1:]
else:
return name
copyright = '''
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2019-2020 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
/******************************************************************************
* Generated from Khronos SPIR-V machine-readable JSON grammar.
*
{}
******************************************************************************/
'''.format("\n".join([prefix_star(line.strip()) for line in spirv['copyright']])).strip()
header.write('''{copyright}
#pragma once
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include <stdint.h>
#include "api/replay/apidefs.h"
#include "api/replay/stringise.h"
#undef None
#undef CopyMemory
#undef MemoryBarrier
namespace rdcspv
{{
static const uint32_t MagicNumber = {magic};
static const uint32_t VersionMajor = {major};
static const uint32_t VersionMinor = {minor};
static const uint32_t VersionRevision = {revision};
static const uint32_t VersionPacked = ({major} << 16) | ({minor} << 8);
static const uint32_t OpCodeMask = 0xffff;
static const uint32_t WordCountShift = 16;
static const uint32_t FirstRealWord = 5;
struct Id
{{
constexpr inline Id() : id(0) {{}}
// only allow explicit functions to cast to/from uint32_t
constexpr static inline Id fromWord(uint32_t i) {{ return Id(i); }}
inline uint32_t value() const {{ return id; }}
constexpr inline explicit operator bool() const {{ return id != 0; }}
constexpr inline bool operator==(const Id o) const {{ return id == o.id; }}
constexpr inline bool operator!=(const Id o) const {{ return id != o.id; }}
constexpr inline bool operator<(const Id o) const {{ return id < o.id; }}
constexpr inline bool operator==(const uint32_t o) const {{ return id == o; }}
constexpr inline bool operator!=(const uint32_t o) const {{ return id != o; }}
constexpr inline bool operator<(const uint32_t o) const {{ return id < o; }}
private:
constexpr inline Id(uint32_t i) : id(i) {{}}
uint32_t id;
}};
enum class Generator : uint32_t
{{'''.format(copyright = copyright, magic = spirv['magic_number'], major = spirv['major_version'], minor = spirv['minor_version'], revision = spirv['revision']))
generator_tostr = ''
for gen in registry.findall('ids[@type=\'vendor\']/id[@tool]'):
name = operand_name(gen.attrib['tool'], lowercase_first=False)
tostr = '{} from {} - {}'.format(gen.attrib['tool'], gen.attrib['vendor'], gen.attrib['comment'])
generator_tostr += ' STRINGISE_ENUM_CLASS_NAMED({}, "{}");\n'.format(name, tostr.replace('"', '\\"').replace('\\', '\\\\'))
header.write('\n {} = {},'.format(name, gen.attrib['value']))
header.write('\n};\n\n')
ops_header.write('''{copyright}
#pragma once
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include <functional>
#include <set>
#include <stdint.h>
#include "api/replay/apidefs.h"
#include "api/replay/rdcstr.h"
#include "api/replay/rdcarray.h"
#include "api/replay/stringise.h"
#undef None
#undef CopyMemory
#undef MemoryBarrier
#include "spirv_common.h"
#include "spirv_gen.h"
namespace rdcspv
{{
template<typename Type>
Type DecodeParam(const ConstIter &it, uint32_t &word);
template<>
inline uint32_t DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return 0;
uint32_t ret = it.word(word);
word += 1;
return ret;
}}
template<>
inline Id DecodeParam<Id>(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return Id();
Id ret = Id::fromWord(it.word(word));
word += 1;
return ret;
}}
template<>
inline rdcstr DecodeParam<rdcstr>(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return "";
rdcstr ret = (const char *)&it.word(word);
word += uint32_t(ret.size() / 4) + 1;
return ret;
}}
template<typename Type>
rdcarray<Type> MultiParam(const ConstIter &it, uint32_t &word)
{{
rdcarray<Type> ret;
while(word < it.size())
{{
Type t = DecodeParam<Type>(it, word);
ret.push_back(t);
}}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const rdcstr &str)
{{
size_t i=0, remainingChars = str.size() + 1;
while(remainingChars > 0)
{{
uint32_t word = 0;
for(size_t w=0; w < remainingChars && w < 4; w++)
word |= uint32_t(str[i+w]) << (w*8);
words.push_back(word);
i += 4;
if(remainingChars < 4)
remainingChars = 0;
else
remainingChars -= 4;
}}
}}
'''.format(copyright = copyright))
cpp.write('''{copyright}
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include "spirv_gen.h"
#include "os/os_specific.h"
#include "common/formatting.h"
#include "spirv_op_helpers.h"
'''.format(copyright = copyright))
###############################################################################
##
## Operands (declare enums, stringise, preprocess)
##
###############################################################################
positional_names = [ 'first', 'second', 'third' ]
kinds = {}
for operand_kind in spirv['operand_kinds']:
name = operand_kind['kind']
if 'enumerants' in operand_kind:
operand_kind['has_params'] = any(['parameters' in value for value in operand_kind['enumerants']])
else:
operand_kind['has_params'] = False
kinds[name] = operand_kind
operand_kind['push_words'] = lambda name: 'words.push_back((uint32_t){});'.format(name)
operand_kind['from_words'] = None
operand_kind['is_id'] = False
if operand_kind['category'] == 'ValueEnum':
operand_kind['size'] = 1
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = name + '::Invalid'
operand_kind['type'] = name
decl = ''
stringise = ''
used = []
for value in operand_kind['enumerants']:
value_name = value['enumerant']
if value_name[0].isdigit():
value_name = '_' + value_name
decl += ' {} = {},\n'.format(value_name, value['value'])
if value['value'] in used:
continue
used.append(value['value'])
if value_name != value['enumerant']:
stringise += ' STRINGISE_ENUM_CLASS_NAMED({}, "{}");\n'.format(value_name, value['enumerant'])
else:
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(value_name)
header.write('''enum class {name} : uint32_t
{{
{values}
Max,
Invalid = ~0U,
}};
'''.format(name = name, values = decl.rstrip()))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::{name} &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::{name});
{{
{values}
}}
END_ENUM_STRINGISE();
}}
'''.format(name = name, values = stringise.rstrip()))
elif operand_kind['category'] == 'BitEnum':
operand_kind['size'] = 1
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = name + '::None'
operand_kind['type'] = name
used = []
decl = ''
stringise = ''
for value in operand_kind['enumerants']:
decl += ' {} = {},\n'.format(value['enumerant'], value['value'])
if value['value'] in used:
continue
used.append(value['value'])
if value['enumerant'] == 'None':
stringise += ' STRINGISE_BITFIELD_CLASS_VALUE(None);\n\n'
else:
stringise += ' STRINGISE_BITFIELD_CLASS_BIT({});\n'.format(value['enumerant'])
header.write('''enum class {name} : uint32_t
{{
{values}
Max,
Invalid = ~0U,
}};
BITMASK_OPERATORS({name});
'''.format(name = name, values = decl.rstrip()))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::{name} &el)
{{
BEGIN_BITFIELD_STRINGISE(rdcspv::{name});
{{
{values}
}}
END_BITFIELD_STRINGISE();
}}
'''.format(name = name, values = stringise.rstrip()))
# Hardcoded special types that we hardcode behaviour for
elif (operand_kind['kind'] == 'IdRef'):
operand_kind['size'] = 1
operand_kind['def_name'] = 'id'
operand_kind['def_value'] = 'Id()'
operand_kind['type'] = 'Id'
operand_kind['is_id'] = True
operand_kind['push_words'] = lambda name: 'words.push_back({}.value());'.format(name)
operand_kind['from_words'] = lambda name: 'Id::fromWord({})'.format(name)
elif (operand_kind['kind'] == 'IdResultType' or
operand_kind['kind'] == 'IdResult' or
operand_kind['kind'] == 'IdMemorySemantics' or
operand_kind['kind'] == 'IdScope'):
operand_kind['size'] = 1
operand_kind['type'] = name
operand_kind['is_id'] = True
operand_kind['def_name'] = name[2].lower() + name[3:]
operand_kind['def_value'] = name + '()'
operand_kind['push_words'] = lambda name: 'words.push_back({}.value());'.format(name)
operand_kind['from_words'] = lambda name: 'Id::fromWord({})'.format(name)
header.write('using {} = Id;\n\n'.format(name))
# For simplicity, assume literal integers are 32-bit in size
elif (operand_kind['kind'] == 'LiteralInteger'):
operand_kind['size'] = 1
operand_kind['def_name'] = 'num'
operand_kind['def_value'] = '0'
operand_kind['type'] = 'uint32_t'
elif (operand_kind['kind'] == 'LiteralString'):
operand_kind['size'] = -1000000
operand_kind['type'] = 'rdcstr'
operand_kind['def_name'] = 'str'
operand_kind['def_value'] = '""'
operand_kind['push_words'] = lambda name: 'EncodeParam(words, {});'.format(name)
operand_kind['from_words'] = lambda name: 'DecodeParam({})'.format(name)
elif (operand_kind['kind'] == 'LiteralContextDependentNumber' or
operand_kind['kind'] == 'LiteralExtInstInteger' or
operand_kind['kind'] == 'LiteralSpecConstantOpInteger'):
operand_kind['size'] = None
elif (operand_kind['kind'] == 'PairLiteralIntegerIdRef'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{0, Id()}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back((uint32_t){0}.first); words.push_back({0}.second.value());'.format(name)
ops_header.write('struct {} {{ uint32_t first; Id second; }};\n\n'.format(name))
elif (operand_kind['kind'] == 'PairIdRefLiteralInteger'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{Id(), 0}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back({0}.first.value()); words.push_back((uint32_t){0}.second);'.format(name)
ops_header.write('struct {} {{ Id first; uint32_t second; }};\n\n'.format(name))
elif (operand_kind['kind'] == 'PairIdRefIdRef'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{Id(), Id()}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back({0}.first.value()); words.push_back({0}.second.value());'.format(name)
ops_header.write('struct {} {{ Id first, second; }};\n\n'.format(name))
continue
else:
raise TypeError("Unexpected operand {} of type {}".format(operand_kind['kind'], operand_kind['category']))
if operand_kind['from_words'] is None:
operand_kind['from_words'] = lambda name,kind=operand_kind: '({}){}'.format(kind['type'], name)
ops_header.write('''
template<>
inline PairIdRefIdRef DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairIdRefIdRef ret = { Id::fromWord(it.word(word)), Id::fromWord(it.word(word+1)) };
word += 2;
return ret;
}
template<>
inline PairLiteralIntegerIdRef DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairLiteralIntegerIdRef ret = { it.word(word), Id::fromWord(it.word(word+1)) };
word += 2;
return ret;
}
template<>
inline PairIdRefLiteralInteger DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairIdRefLiteralInteger ret = { Id::fromWord(it.word(word)), it.word(word+1) };
word += 2;
return ret;
}
''')
tostrs = ''
tostr_decls = ''
# Second pass to declare operand parameter structs in ops helper header
for operand_kind in spirv['operand_kinds']:
name = operand_kind['kind']
if not operand_kind['has_params']:
if operand_kind['category'] == 'ValueEnum':
ops_header.write('inline uint16_t OptionalWordCount(const {0} val) {{ return val != {0}::Invalid ? 1 : 0; }}\n\n'.format(name))
continue
values = ''
set_unset = ''
word_count_cases = ''
decode_cases = ''
encode_cases = ''
constructors = ''
tostr_cases = ''
value_enum = operand_kind['category'] == 'ValueEnum'
bit_enum = operand_kind['category'] == 'BitEnum'
used = []
for value in operand_kind['enumerants']:
params = ''
assign = ''
ret_assign = ''
new_value = value['value'] not in used
used.append(value['value'])
if new_value and bit_enum:
tostr_cases += ' if(el.flags & {0}::{1})\n ret += "{1}"'.format(name, value['enumerant'])
if 'parameters' in value:
# We want plain unions, so don't include strings
if any([param['kind'] == 'LiteralString' for param in value['parameters']]):
continue
if new_value and value_enum:
tostr_cases += ' case {0}::{1}:\n ret += '.format(name, value['enumerant'])
member = ""
param_name = operand_name(value['enumerant'])
size = 0
if new_value:
if value_enum:
decode_cases += ' case {0}::{1}:\n'.format(name, value['enumerant'])
encode_cases += ' case {0}::{1}:\n'.format(name, value['enumerant'])
else:
decode_cases += ' if(ret.flags & {0}::{1})\n {{\n'.format(name, value['enumerant'])
encode_cases += ' if(param.flags & {0}::{1})\n {{\n'.format(name, value['enumerant'])
# if we only have one parameter, add its type to the set
if len(value['parameters']) == 1:
param = value['parameters'][0]
size += kinds[param['kind']]['size']
param_type = kinds[param['kind']]['type']
member = "{} {};\n".format(param_type, param_name)
if value_enum:
values += ' '
if new_value:
decode_cases += ' '
encode_cases += ' '
values += " " + member
params += "{} {}Param".format(param_type, param_name)
assign += " {0} = {0}Param;".format(param_name)
ret_assign += " ret.{0} = {0};\n".format(param_name)
if new_value:
decode_cases += ' ret.{} = {};\n'.format(param_name, kinds[param['kind']]['from_words']('it.word(word)'))
encode_cases += ' {}\n'.format(kinds[param['kind']]['push_words']('param.{}'.format(param_name)))
if kinds[param['kind']]['is_id']:
tostr_cases += ' "(" + idName(el.{}) + ")"'.format(param_name)
else:
tostr_cases += ' "(" + ToStr(el.{}) + ")"'.format(param_name)
# if we have multiple we need a separate struct for this thing
else:
struct_name = param_name[0].upper() + param_name[1:] + 'Params'
member = "{} {};\n".format(struct_name, param_name)
if value_enum:
values += ' '
values += " " + member
struct_values = ''
if new_value:
tostr_cases += ' "("'
for i,param in enumerate(value['parameters']):
subparam_name = positional_names[i]
kind = kinds[param['kind']]
size += kind['size']
if 'name' in param:
subparam_name = operand_name(param['name'])
struct_values += " {} {};\n".format(kind['type'], subparam_name)
if new_value:
if value_enum:
decode_cases += ' '
encode_cases += ' '
decode_cases += ' ret.{}.{} = {};\n'.format(param_name, subparam_name, kinds[param['kind']]['from_words']('it.word(word+{})'.format(i)))
encode_cases += ' {}\n'.format(kinds[param['kind']]['push_words']('param.{}.{}'.format(param_name, subparam_name)))
if kinds[param['kind']]['is_id']:
tostr_cases += ' + idName(el.{}.{}) + '.format(param_name, subparam_name)
else:
tostr_cases += ' + ToStr(el.{}.{}) + '.format(param_name, subparam_name)
assign += " {0}.{1} = {1};".format(param_name, subparam_name)
ret_assign += " ret.{0}.{1} = {0}.{1};\n".format(param_name, subparam_name)
params += "{} {}".format(kind['type'], subparam_name)
if i != len(value['parameters'])-1:
params += ", "
tostr_cases += '", " '
if new_value:
tostr_cases += '")"'
header.write('''struct {struct_name}
{{
{struct_values}
}};
'''.format(struct_name = struct_name, struct_values = struct_values.rstrip()))
if new_value:
if value_enum:
decode_cases += ' word += {};\n'.format(size)
decode_cases += ' break;\n'
encode_cases += ' break;\n'
tostr_cases += '; break;\n'
else:
decode_cases += ' word += {};\n'.format(size)
decode_cases += ' }\n'
encode_cases += ' }\n'
word_count_cases += ' case {}::{}: return {};\n'.format(name, value['enumerant'], size)
constructors += '''template<>\nstruct {name}Param<{name}::{value}>
{{
{member}
{name}Param({params}) {{ {assign} }}
operator {name}AndParamData()
{{
{name}AndParamData ret({name}::{value});
{ret_assign}
return ret;
}}
}};
'''.format(value=value['enumerant'], member=member.rstrip(), name=name, params=params, assign=assign, ret_assign=ret_assign.rstrip())
if new_value and bit_enum:
tostr_cases += ' ", ";\n'
set_unset += ''' void set{flag}({params}) {{ flags |= {name}::{flag};{assign} }}
void unset{flag}() {{ flags &= ~{name}::{flag}; }}
'''.format(flag=value['enumerant'], name=name, params=params, assign=assign)
if constructors != '':
constructors = 'template<{name} val> struct {name}Param;\n\n'.format(name=name) + constructors
# ValueEnums are set up as one or many pairs of enum/params, enum/params, etc. So we declare a struct for the pair
# and declare an array if the op wants many
if value_enum:
tostrs += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamData &el)
{{
rdcstr ret = ToStr(el.value);
switch(el.value)
{{
{tostr_cases}
default:
break;
}}
return ret;
}}
'''.format(name=name, tostr_cases=tostr_cases.rstrip())
tostr_decls += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamData &el);'''.format(name=name)
header.write('''struct {name}AndParamData
{{
{name}AndParamData({name} v = {name}::Invalid) : value(v) {{}}
{name} value;
union
{{
{values}
}};
operator {name}() const {{ return value; }}
bool operator ==(const {name} v) const {{ return value == v; }}
}};
'''.format(name=name, values=values.rstrip()))
ops_header.write('''{constructors}
template<>
inline {name}AndParamData DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return {name}AndParamData();
{name}AndParamData ret(({name})it.word(word));
word++;
switch(ret.value)
{{
{decode_cases}
default: break;
}}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const {name}AndParamData ¶m)
{{
words.push_back((uint32_t)param.value);
switch(param.value)
{{
{encode_cases}
default: break;
}}
}}
'''.format(name=name, value_name=operand_name(name), decode_cases=decode_cases.rstrip(),
constructors=constructors, encode_cases=encode_cases.rstrip()))
operand_kind['type'] = '{}AndParamData'.format(name)
# BitEnums are set up with one bitmask, and then a series of parameters, so we declare a struct with an array
elif bit_enum:
tostrs += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamDatas &el)
{{
rdcstr ret;
{tostr_cases}
// remove trailing ", "
if(ret.size() > 2)
ret.erase(ret.size()-2, 2);
return ret;
}}
'''.format(name=name, tostr_cases=tostr_cases.rstrip())
tostr_decls += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamDatas &el);'''.format(name=name)
header.write('''struct {name}AndParamDatas
{{
{name}AndParamDatas({name} f = {name}::None) : flags(f) {{}}
{name} flags;
{values}
operator {name}() const {{ return flags; }}
bool operator &(const {name} v) const {{ return bool(flags & v); }}
{set_unset}
}};
'''.format(name=name, values=values.rstrip(), set_unset=set_unset.rstrip()))
ops_header.write('''template<>
inline {name}AndParamDatas DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return {name}AndParamDatas();
{name}AndParamDatas ret(({name})it.word(word));
word++;
{decode_cases}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const {name}AndParamDatas ¶m)
{{
words.push_back((uint32_t)param.flags);
{encode_cases}
}}
'''.format(name=name, decode_cases=decode_cases.rstrip(), encode_cases=encode_cases.rstrip()))
operand_kind['type'] = '{}AndParamDatas'.format(name)
else:
raise TypeError("unexpected operand kind {} with parameters".format(operand_kind['category']))
ops_header.write('''inline uint16_t ExtraWordCount(const {name} {value_name})
{{
switch({value_name})
{{
{word_count_cases}
default: break;
}}
return 0;
}}
'''.format(name = name, value_name = operand_name(name), word_count_cases = word_count_cases.rstrip()))
ops_header.write('''
inline uint16_t ExtraWordCount(const rdcstr &val)
{
return uint16_t(val.size() / 4);
}
inline uint16_t OptionalWordCount(const rdcstr &val)
{
if(val.empty()) return 0;
return uint16_t(val.size() / 4) + 1;
}
inline uint16_t OptionalWordCount(const Id &val)
{
return val != Id() ? 1 : 0;
}
inline uint16_t OptionalWordCount(const PairIdRefLiteralInteger &val)
{
return val.first != Id() ? 2 : 0;
}
inline uint16_t OptionalWordCount(const PairLiteralIntegerIdRef &val)
{
return val.second != Id() ? 2 : 0;
}
inline uint16_t OptionalWordCount(const PairIdRefIdRef &val)
{
return val.first != Id() ? 2 : 0;
}
template<typename Type>
uint16_t MultiWordCount(const rdcarray<Type> &multiParams)
{
uint16_t ret = 0;
for(size_t i=0; i < multiParams.size(); i++)
ret += sizeof(multiParams[i])/sizeof(uint32_t);
return ret;
}
''')
###############################################################################
##
## Opcodes (declare enum / stringise)
##
###############################################################################
# Quickly preprocess, find parameters with duplicated names and disambiguate
for inst in spirv['instructions']:
if 'operands' in inst:
operands = inst['operands']
duplicates = []
for i,A in enumerate(operands):
for j,B in enumerate(operands):
if j <= i:
continue
a = operand_name(A['name'] if 'name' in A else kinds[A['kind']]['def_name'])
b = operand_name(B['name'] if 'name' in B else kinds[B['kind']]['def_name'])
if a == b:
if i not in duplicates:
duplicates.append(i)
if j not in duplicates:
duplicates.append(j)
if len(duplicates) > 0:
for idx,arg in enumerate(duplicates):
A = operands[arg]
operands[arg]['name'] = operand_name(A['name'] if 'name' in A else kinds[A['kind']]['def_name']) + str(idx)
used = []
decl = ''
stringise = ''
op_structs = ''
op_decoder = ''
used_ids = ''
disassemble = ''
for inst in spirv['instructions']:
decl += ' {} = {},\n'.format(inst['opname'][2:], inst['opcode'])
if inst['opcode'] in used:
continue
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(inst['opname'][2:])
result = -1
resultType = -1
used_ids += ' case rdcspv::Op::{}:\n'.format(inst['opname'][2:])
operands = []
if 'operands' in inst:
operands = inst['operands']
last_operand = operands[-1]
for i,operand in enumerate(operands):
if operand['kind'] == 'IdResult':
result = i+1
if operand['kind'] == 'IdResultType':
resultType = i+1
disassemble += ' case rdcspv::Op::{}:\n'.format(inst['opname'][2:])
disassemble += ' {\n'
if any([kinds[operand['kind']]['size'] is None for operand in operands]):
op_struct = 'struct {}; // has operands with variable sizes\n\n'.format(inst['opname'])
disassemble += ' OpDecoder decoded(it);\n'.format(inst['opname'][2:])
if resultType > 0 and result > 0:
disassemble += ' ret += declName(decoded.resultType, decoded.result) + " = ";\n'
elif resultType > 0 and result == -1:
raise ValueError("Unexpected result type without result")
elif resultType == -1 and result > 0:
disassemble += ' ret += idName(decoded.result) + " = ";\n'
disassemble += ' ret += "{}(...)";\n'.format(inst['opname'][2:])
disassemble += ' break;\n'
disassemble += ' }\n'
else:
params = ''
assign = ''
member_decl = ''
size_name = 'FixedWordSize'
construct_size = 'FixedWordSize'
size = 1 # opcode / wordcount packed
all_size = 1 # size, but with all optionals included
iter_init = ' memcpy(this, it.words(), sizeof(*this));'
complex_type = False
manual_init = ' this->op = OpCode;\n'
manual_init += ' this->wordCount = (uint16_t)it.size();\n'
oper_cast = ' operator Operation() const\n {\n rdcarray<uint32_t> words;\n'
has_funcs = ''
disassemble += ' Op{} decoded(it);\n'.format(inst['opname'][2:])
if resultType > 0 and result > 0:
disassemble += ' ret += declName(decoded.resultType, decoded.result) + " = ";\n'
elif resultType > 0 and result == -1:
raise ValueError("Unexpected result type without result")
elif resultType == -1 and result > 0:
disassemble += ' ret += idName(decoded.result) + " = ";\n'
disassemble += ' ret += "{}("'.format(inst['opname'][2:])
disassemble_params = False
if 'operands' in inst:
for i,operand in enumerate(operands):
kind = kinds[operand['kind']]
if kind['has_params'] and not complex_type:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
manual_init += ' uint32_t word = {};\n'.format(all_size)
quantifier = ''
if 'quantifier' in operand:
quantifier = operand['quantifier']
if not complex_type:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
if quantifier == '*':
manual_init += ' uint32_t word = {};\n'.format(all_size)
if kind['is_id']:
if quantifier == '*':
used_ids += ' for(size_t i=0; i < size-{0}; i++) callback(Id::fromWord(it.word({0}+i)), {1});\n'.format(all_size, 'true' if i+1==result else 'false')
else:
used_ids += ' callback(Id::fromWord(it.word({})), {});\n'.format(all_size, 'true' if i+1==result else 'false')
if kind['size'] < 0:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
manual_init += ' uint32_t word = {};\n'.format(all_size)
opType,opName = (kind['type'], operand_name(operand['name'] if 'name' in operand else kind['def_name']))
if i+1 != resultType and i+1 != result:
if quantifier == '*':
disassemble += ' + ParamsToStr(idName, decoded.{})'.format(opName)
else:
if opType == 'IdScope':
disassemble += ' + ToStr(Scope(constIntVal(decoded.{})))'.format(opName)
elif opType == 'IdMemorySemantics':
disassemble += ' + ToStr(MemorySemantics(constIntVal(decoded.{})))'.format(opName)
else:
disassemble += ' + ParamToStr(idName, decoded.{})'.format(opName)
if i+1 < len(operands):
disassemble += ' + ", "'
disassemble_params = True
if quantifier == '?':
params += '{} {} = {}, '.format(opType, opName, kind['def_value'])
elif quantifier == '*':
params += 'const rdcarray<{}> &{} = {{}}, '.format(opType, opName)
else:
params += '{} {}, '.format(opType, opName)
if quantifier == '*':
member_decl += ' rdcarray<{}> {};\n'.format(opType, opName)
else:
member_decl += ' {} {};\n'.format(opType, opName)
assign += ' this->{} = {};\n'.format(opName, opName)
if operand['kind'] == 'LiteralString':
if quantifier == '*':
raise ValueError('operand {} in {} is string but has * quantifier'.format(opName, inst['opname']))
manual_init += ' this->{name} = DecodeParam<{type}>(it, word);\n'.format(name = opName, type = opType)
oper_cast += ' EncodeParam(words, {name});\n'.format(name = opName)
if quantifier == '?':
construct_size += ' + OptionalWordCount({})'.format(opName)
has_funcs += ' bool Has{name}() const {{ return wordCount > {idx}; }}\n'.format(idx = all_size, name = opName[0].upper() + opName[1:])
else:
construct_size += ' + ExtraWordCount({})'.format(opName)
elif kind['has_params']:
if quantifier == '*':
raise ValueError('operand {} in {} has * quantifier and params'.format(opName, inst['opname']))
manual_init += ' this->{name} = DecodeParam<{type}>(it, word);\n'.format(name = opName, type = opType)
oper_cast += ' EncodeParam(words, {name});\n'.format(name = opName)
construct_size += ' + ExtraWordCount({})'.format(opName)
elif quantifier == '*':
manual_init += ' this->{name} = MultiParam<{type}>(it, word);\n'.format(name = opName, type = opType)
construct_size += ' + MultiWordCount({})'.format(opName)
oper_cast += ' for(size_t i=0; i < {name}.size(); i++)\n'.format(name = opName)
oper_cast += ' {\n'
oper_cast += ' {push_words}\n'.format(push_words = kind['push_words']('{}[i]'.format(opName)))
oper_cast += ' }\n'
elif quantifier == '?':
manual_init += ' this->{name} = (it.size() > {idx}) ? {value} : {def_value};\n'.format(name = opName, type = opType, idx = all_size, value = kind['from_words']('it.word({})'.format(all_size)), def_value=kind['def_value'])
construct_size += ' + OptionalWordCount({})'.format(opName)
oper_cast += ' if({name} != {def_value}) {push_words}\n'.format(name = opName, def_value=kind['def_value'], push_words = kind['push_words'](opName))
has_funcs += ' bool Has{name}() const {{ return wordCount > {idx}; }}\n'.format(idx = all_size, name = opName[0].upper() + opName[1:])
else:
manual_init += ' this->{name} = {value};\n'.format(name = opName, type = opType, value = kind['from_words']('it.word({})'.format(all_size)))
oper_cast += ' {push_words}\n'.format(push_words = kind['push_words'](opName))
if kind['size'] >= 0:
all_size += kind['size']
else:
all_size += 1
if quantifier == '':
size = all_size
else:
assign = ' // no operands'
member_decl = ' // no operands'
if complex_type:
iter_init = manual_init.rstrip()
oper_cast += ' return Operation(OpCode, words);\n }\n'
else:
oper_cast = ''
if params != '':
params = params[0:-2]
if disassemble_params:
disassemble += ' + ")";\n'
else:
disassemble += ' ")";\n'
disassemble += ' break;\n'
disassemble += ' }\n'
if has_funcs != '':
has_funcs = '\n\n' + has_funcs
op_struct = '''struct {name}
{{
{name}(const ConstIter &it)
{{
{iter_init}
}}
{name}({params})
: op(Op::{opname})
, wordCount({construct_size})
{{
{assign}
}}
{oper_cast}
static constexpr Op OpCode = Op::{opname};
static constexpr uint16_t {size_name} = {size}U;
Op op;
uint16_t wordCount;
{member_decl}{has_funcs}
}};
'''.format(opname=inst['opname'][2:], name=inst['opname'], params=params, iter_init=iter_init, assign=assign.rstrip(),
member_decl=member_decl.rstrip(), size_name=size_name, construct_size=construct_size,
oper_cast=oper_cast, size=size, has_funcs=has_funcs.rstrip())
op_structs += op_struct
# Sanity check that quantifiers only happen on final operands. Also if there are multiple they are all ?, not *
if 'operands' in inst:
operands = inst['operands']
last_operand = operands[-1]
for operand in operands:
if operand != last_operand and 'quantifier' in operand and ('quantifier' not in last_operand or last_operand['quantifier'] != operand['quantifier'] or operand['quantifier'] != '?'):
raise ValueError('quantifier on operand {} in {} but not on last operand'.format(operand['name'], inst['opname']))
used_ids += ' break;\n'
if result < 0:
result = ' result = Id();'
else:
result = ' result = Id::fromWord(it.word({}));'.format(result)
if resultType < 0:
resultType = ' resultType = Id();'
else:
resultType = ' resultType = Id::fromWord(it.word({}));'.format(resultType)
op_decoder += ' case rdcspv::Op::{}:{}{} break;\n'.format(inst['opname'][2:], result, resultType)
used.append(inst['opcode'])
header.write('''enum class Op : uint16_t
{{
{decl}
Max,
}};
'''.format(decl = decl))
ops_header.write('''
{op_structs}
template<typename T>
inline rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const T &el)
{{
return ToStr(el);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const Id &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcstr &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairLiteralIntegerIdRef &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefLiteralInteger &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefIdRef &el);
{tostr_decls}
template<typename U>
inline rdcstr ParamsToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcarray<U> &ids)
{{
rdcstr ret = "{{";
for(size_t i=0; i < ids.size(); i++)
{{
ret += ParamToStr(idName, ids[i]);
if(i + 1 < ids.size())
ret += ", ";
}}
ret += "}}";
return ret;
}}
struct OpDecoder
{{
OpDecoder(const ConstIter &it);
static void ForEachID(const ConstIter &it, const std::function<void(Id,bool)> &callback);
static rdcstr Disassemble(const ConstIter &it, const std::function<rdcstr(Id,Id)> &declName, const std::function<rdcstr(rdcspv::Id)> &idName, const std::function<uint32_t(Id)> &constIntVal);
Op op;
uint16_t wordCount;
Id result;
Id resultType;
}};
'''.format(op_structs = op_structs.rstrip(), tostr_decls = tostr_decls))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::Op &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::Op);
{{
{stringise}
}}
END_ENUM_STRINGISE();
}}
namespace rdcspv
{{
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const Id &el)
{{
return idName(el);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcstr &el)
{{
return "\\"" + el + "\\"";
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairLiteralIntegerIdRef &el)
{{
return StringFormat::Fmt("[%u, %s]", el.first, idName(el.second).c_str());
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefLiteralInteger &el)
{{
return StringFormat::Fmt("[%s, %u]", idName(el.first).c_str(), el.second);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefIdRef &el)
{{
return StringFormat::Fmt("[%s, %s]", idName(el.first).c_str(), idName(el.second).c_str());
}}
{tostrs}
void OpDecoder::ForEachID(const ConstIter &it, const std::function<void(Id,bool)> &callback)
{{
size_t size = it.size();
switch(it.opcode())
{{
{used_ids}
case Op::Max: break;
}}
}}
rdcstr OpDecoder::Disassemble(const ConstIter &it, const std::function<rdcstr(Id,Id)> &declName, const std::function<rdcstr(rdcspv::Id)> &idName, const std::function<uint32_t(Id)> &constIntVal)
{{
rdcstr ret;
switch(it.opcode())
{{
{disassemble}
case Op::Max: break;
}}
return ret;
}}
OpDecoder::OpDecoder(const ConstIter &it)
{{
op = it.opcode();
wordCount = (uint16_t)it.size();
switch(op)
{{
{op_decoder}
case Op::Max: break;
}}
}}
}}; // namespace rdcspv
'''.format(stringise = stringise.rstrip(), op_decoder = op_decoder.rstrip(), used_ids = used_ids.rstrip(), disassemble = disassemble.rstrip(), tostrs = tostrs.rstrip()));
###############################################################################
##
## GLSL ext inst set (declare enum)
##
###############################################################################
decl = ''
stringise = ''
for inst in glsl450['instructions']:
decl += ' {} = {},\n'.format(inst['opname'], inst['opcode'])
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(inst['opname'])
header.write('''enum class GLSLstd450 : uint32_t
{{
{decl}
Max,
Invalid = ~0U,
}};
'''.format(decl = decl))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::GLSLstd450 &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::GLSLstd450);
{{
{stringise}
}}
END_ENUM_STRINGISE();
}}
template <>
rdcstr DoStringise(const rdcspv::Generator &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::Generator);
{{
{generator_tostr}
}}
END_ENUM_STRINGISE();
}}
'''.format(stringise = stringise.rstrip(), generator_tostr = generator_tostr.rstrip()))
header.write('''
}; // namespace rdcspv
DECLARE_STRINGISE_TYPE(rdcspv::GLSLstd450);
DECLARE_STRINGISE_TYPE(rdcspv::Generator);
''')
for operand_kind in spirv['operand_kinds']:
if operand_kind['category'] == 'ValueEnum' or operand_kind['category'] == 'BitEnum':
header.write('DECLARE_STRINGISE_TYPE(rdcspv::{});\n'.format(operand_kind['kind']))
ops_header.write('''
}; // namespace rdcspv
''')
header.close()
ops_header.close()
cpp.close()
|
from time import sleep
nome = input('Digite um nome: ')
print('Vamos verificar se seu nome tem Silva...')
sleep(3)
if 'Silva' in nome and nome.upper() and nome.lower():
print('Sim')
else:
print('Não')
|
import os
import sys
import numpy as np
import pandas as pd
from classes.Signal import Signal
from classes.DataSource import DataSource
from classes.SignalClassifier import SignalClassifier
import matplotlib.pyplot as plt
ds = DataSource()
# Load initial labeled training set T
labeled_ds = ds.load_or_process_labeled_dataset()
# Load entire (unlabeled) data set P
ds.load_or_process_entire_dataset()
# Remove T from P (i.e. P = P-T)
ds.remove_labeled_subset_from_dataset(labeled_ds)
# Initialize model
c = SignalClassifier()
input_dim = len(labeled_ds.feature_vec.iloc[0])
c.init_nn_model(input_dim=input_dim)
num_batches = 10
batch = 1
# Train on T
print("Batch %d/%d" % (batch, num_batches))
c.train(labeled_ds, num_epochs=10)
while batch<num_batches:
batch+=1
# First, sort the dataset by model predictions
ds.dataset = c.pred_and_sort(ds.dataset)
qty = 100
if ds.dataset.shape[0] < qty*2:
break # reached end of dataset
# Extract the most confidently classified new features T from P
most_confident_samples = pd.concat([ds.dataset.iloc[:qty],
ds.dataset.iloc[-qty:]])
# Drop these from greater dataset (in memory only) to avoid
# using them in next iteration (P = P-T)
samples_to_drop = list(ds.dataset.iloc[:qty].index.values) + \
list(ds.dataset.iloc[-qty:].index.values)
ds.dataset.drop(samples_to_drop, inplace=True)
# Generate labels based on predictions
labels = np.rint(most_confident_samples.pred)
most_confident_samples["label"] = list(labels)
print("\r\nBatch %d/%d" % (batch, num_batches))
c.train(most_confident_samples, num_epochs=4)
# Evaluate
test_ds = ds.load_or_process_labeled_dataset(from_file_id=20)
print("Positive test set size",test_ds[test_ds.label == 1].shape[0])
print("Negative test set size",test_ds[test_ds.label == 0].shape[0])
results = c.evaluate(test_ds)
results = {c.model.metrics_names[i]:v for i,v in enumerate(results)}
print(results)
# Display results
test_ds = c.pred_and_sort(test_ds)
ds.confusion(test_ds)
c.plot_losses()
# Plot a few of the most confidently predicted segments
ds.display_dataset(test_ds)
|
import os
import yaml
def configure():
# Load the config and get the headers setup
config = os.environ.get('GITISSUEBOT_CONFIG', None)
if not config:
return
with open('config.yml', 'r') as stream:
config = yaml.load(stream)
return config
config = configure()
|
import unittest
import openmdao.api as om
from openmdao.utils.assert_utils import assert_check_partials, assert_near_equal
import dymos as dm
from dymos.transcriptions.explicit_shooting.test.test_rk_integration_comp import SimpleODE
from dymos.transcriptions.explicit_shooting.ode_evaluation_group import ODEEvaluationGroup
class TestODEEvaluationGroup(unittest.TestCase):
def test_eval(self):
ode_class = SimpleODE
time_options = dm.phase.options.TimeOptionsDictionary()
time_options['targets'] = 't'
time_options['units'] = 's'
state_options = {'x': dm.phase.options.StateOptionsDictionary()}
state_options['x']['shape'] = (1,)
state_options['x']['units'] = 's**2'
state_options['x']['rate_source'] = 'x_dot'
state_options['x']['targets'] = ['x']
param_options = {'p': dm.phase.options.ParameterOptionsDictionary()}
param_options['p']['shape'] = (1,)
param_options['p']['units'] = 's**2'
param_options['p']['targets'] = ['p']
control_options = {}
polynomial_control_options = {}
p = om.Problem()
tx = dm.GaussLobatto(num_segments=1, order=3)
p.model.add_subsystem('ode_eval', ODEEvaluationGroup(ode_class, time_options, state_options,
param_options, control_options,
polynomial_control_options,
grid_data=tx.grid_data,
ode_init_kwargs=None))
p.setup(force_alloc_complex=True)
p.model.ode_eval.set_segment_index(0)
p.set_val('ode_eval.states:x', [1.25])
p.set_val('ode_eval.time', [2.2])
p.set_val('ode_eval.parameters:p', [1.0])
p.run_model()
x = p.get_val('ode_eval.states:x')
t = p.get_val('ode_eval.time')
xdot_check = x - t**2 + 1
assert_near_equal(p.get_val('ode_eval.state_rate_collector.state_rates:x_rate'), xdot_check)
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
import pytest
from salary_calculator.models import Payout
@pytest.mark.salary_calculator_signals
class TestSignals:
def test_update_payout_monthly_earnings(self, day):
day.bonus = 140
day.save()
payout = Payout.objects.get(month=day.month)
assert payout.monthly_earnings == 140
def test_remove_payout_monthly_earnings(self, day):
day.delete()
payout = Payout.objects.get(month=day.month)
assert payout.monthly_earnings == 0.0
|
import numpy
def calculate(data, baseline, baselag, cusumFlag, cusumK, minSigma, thresh):
"""
function to implement ears algorithm
data: list of numbers of on which algorithm is applied
baseline: 7 days for each algorithm
baselag: 0 for C1 and 2 for both C2 and C3
cusumFlag: 0 for both C1 and C2 and 1 for C3
cusumK: 1 for all C1, C2 and C3
minSigma: 0.1 for all C1, C2 and C3
thres: 2 for all C1, C2 and C3
Returns two sets of list:
earStat: returns same number of data as original list with ears value
curr: returns list whih consists of same values as earStat but removing
the unwanted inital 7 days value for C1 and 9 days value for C2 and C3
"""
earstat = [0] * len(data)
cusum0 = 0
cusum1 = 0
ndxBase = 0
estMean = 0
estSigma = 0
currSum = 0
curr = []
for i in range(baseline + baselag, len(data)):
ndxBase = i - (baseline + baselag)
new_list = []
for j in range(baseline):
new_list.append(data[ndxBase])
ndxBase = ndxBase + 1
estMean = numpy.mean(new_list)
list_std = numpy.std(new_list)
estSigma = max(minSigma, list_std)
try:
currSum = max(0, data[i] - estMean - cusumK * estSigma) / estSigma
except:
currSum = 0
curr.append(currSum)
earstat[i] = currSum + cusumFlag * (cusum0 + cusum1)
cusum0 = cusum1
if currSum >= thresh:
cusum1 = 0
else:
cusum1 = currSum
return earstat, curr
def calculateC1(data):
return calculate(data, 7, 0, 0, 1, .1, 2)
def calculateC2(data):
return calculate(data, 7, 2, 0, 1, .1, 2)
def calculateC3(data):
return calculate(data, 7, 2, 1, 1, .1, 2)
inp = [6, 142, 124, 145, 9, 6, 184, 130, 140, 136, 136, 20, 3, 169, 170, 134, 106,
181, 22, 1, 7, 233, 196, 153, 155, 19, 2, 277, 198, 192, 191, 218, 25, 4,
281, 161, 199, 182, 197, 19, 15, 272, 235, 227, 169, 153, 34, 7, 8, 299,
210, 135, 135, 18, 12, 163, 120, 85, 91, 87, 17, 3, 167, 130, 86, 87, 88,
11, 5, 128, 103, 86, 10154, 79, 13, 4, 113, 92, 82, 85, 59, 16, 9, 101, 100,
76, 68, 83, 7, 5, 110, 98, 60, 37, 91, 6, 1, 53, 87, 115, 93, 83, 8, 4, 126,
96, 84, 73, 67, 6, 3, 143, 88, 90, 92, 109, 8, 2, 150, 130, 91, 93, 101, 3,
0, 157, 119, 106, 104, 84, 6, 1, 140, 100, 90, 51, 92, 9, 2, 111, 103, 95,
89, 46, 16, 3, 1, 78, 77, 78, 59, 4, 2, 69, 70, 61, 53, 47, 5, 0, 68, 75,
55, 49, 42, 2, 1, 45, 44, 38, 32, 39, 6, 0, 62, 53, 39, 42, 39, 3, 2, 45,
44, 30, 1, 1, 6, 2, 32, 33, 13, 16, 34, 6, 0, 39, 30, 26, 21, 21, 4, 1, 29,
34, 20, 25, 21, 3, 3, 44, 1];
print len(calculateC2(inp)[0]), len(inp)
print calculateC2(inp)[0]
|
from django import forms
class EmailNotificationForm(forms.Form):
enabled = forms.BooleanField(required=False)
channel_id = forms.EmailField(required=True, label="Send emails to")
notification_id = forms.CharField(widget=forms.HiddenInput(), required=True)
class TelegramNotificationForm(forms.Form):
enabled = forms.BooleanField(required=False)
notification_id = forms.CharField(widget=forms.HiddenInput(), required=True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class I18nFiltersTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
@setup({'i18n32': '{% load i18n %}{{ "hu"|language_name }} '
'{{ "hu"|language_name_local }} {{ "hu"|language_bidi }} '
'{{ "hu"|language_name_translated }}'})
def test_i18n32(self):
output = self.engine.render_to_string('i18n32')
self.assertEqual(output, 'Hungarian Magyar False Hungarian')
with translation.override('cs'):
output = self.engine.render_to_string('i18n32')
self.assertEqual(output, 'Hungarian Magyar False maďarsky')
@setup({'i18n33': '{% load i18n %}'
'{{ langcode|language_name }} {{ langcode|language_name_local }} '
'{{ langcode|language_bidi }} {{ langcode|language_name_translated }}'})
def test_i18n33(self):
output = self.engine.render_to_string('i18n33', {'langcode': 'nl'})
self.assertEqual(output, 'Dutch Nederlands False Dutch')
with translation.override('cs'):
output = self.engine.render_to_string('i18n33', {'langcode': 'nl'})
self.assertEqual(output, 'Dutch Nederlands False nizozemsky')
@setup({'i18n38_2': '{% load i18n custom %}'
'{% get_language_info_list for langcodes|noop:"x y" as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }}/{{ l.name_translated }} '
'bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n38_2(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38_2', {'langcodes': ['it', 'fr']})
self.assertEqual(
output,
'it: Italian/italiano/italsky bidi=False; '
'fr: French/français/francouzsky bidi=False; '
)
|
from validation import vworkspace
with vworkspace() as w:
w.activate('must_regions')
w.props.constant_path_effects = False
w.all_functions.display('PRINT_CODE_REGIONS')
w.all_functions.display('PRINT_CODE_IN_REGIONS')
w.all_functions.display('PRINT_CODE_OUT_REGIONS')
|
from ._additive_attention import AdditiveAttention
__all__ = ["AdditiveAttention"]
|
def are_anagrams(s1, s2):
if len(s1) != len(s2):
return False
return sorted(s1) == sorted(s2)
s1 = "nameless"
s2 = "salesman"
str1 = "name"
str2 = "mane"
|
import os
def loadCredentialsFile(filePath):
data = None
with open(filePath, "r") as f:
data = f.read().splitlines()
if not data:
raise FileNotFoundError
credentials_list = []
for line in data:
username, password = line.split(" ", 1)
new_login = {}
new_login["username"] = username
new_login["password"] = password
credentials_list.append(new_login)
return credentials_list
# Testing above code
# username = "123"
# password = "456"
# login_credentials = loadCredentialsFile("credentials.txt")
# for credential in login_credentials:
# if credential["username"] == username and credential["password"] == password:
# print("Correct!")
# else:
# print("Nope D:")
|
from JumpScale import j
descr = """
Sets network for LXC machine
"""
name = "vfs_setnetwork"
category = "vfw"
organization = "jumpscale"
author = "zains@incubaid.com"
license = "bsd"
version = "1.0"
roles = []
def action(name, vxlanid, pubips, dmzips):
import JumpScale.lib.lxc
import JumpScale.baselib.remote
bridge = j.application.config.get('lxc.bridge.public')
gateway = j.application.config.get('lxc.bridge.public.gw')
j.system.platform.lxc.networkSetPublic(name, netname="pub0", bridge=bridge, pubips=pubips, gateway=gateway)
j.system.platform.lxc.networkSetPrivateOnBridge(name, netname="dmz0", bridge=bridge, ipaddresses=dmzips)
# TODO: call networkSetPrivateVXLan with parameters
|
import pytest
@pytest.fixture()
def source_simple():
source_simple = "\n".join(["begin\npy{{a=1}}e{{a;b}}end"])
return source_simple
def test_render_inline_code(jupyter, source_simple):
splitter = jupyter.parser.split(source_simple)
next(splitter)
cell = next(splitter)
assert cell.context["code"] == "a=1"
next(splitter)
cell = next(splitter)
assert cell.context["code"] == "a;b"
|
from WordVecDict import WordVecDict
import numpy as np
def readTestDataFile(filename, prefix):
data = []
for line in open(filename, 'r').readlines():
if len(line) == 0:
continue
sentence = []
for w in line.split():
sentence.append(prefix + w)
data.append(sentence)
return data
def readTestData():
zhData = readTestDataFile("data/test.zh_parsed", "1")
jaData = readTestDataFile("data/test.ja_parsed", "2")
return zhData, jaData
def calculateAcc(vecFile):
wordVecDict = WordVecDict("output/vocab.cj_001", vecFile)
zhData, jaData = readTestData()
n = len(zhData)
zhVec = []
jaVec = []
for i in xrange(n):
zhVec.append(wordVecDict.sentenceVec(zhData[i]))
jaVec.append(wordVecDict.sentenceVec(jaData[i]))
matrix = []
for i in xrange(n):
line = []
for j in xrange(n):
line.append(np.dot(zhVec[i], jaVec[j]))
matrix.append(line)
dic = {}
acc = 0
for i in xrange(n):
order = [(matrix[i][j], j) for j in xrange(n)]
order.sort()
for j in xrange(n):
if order[j][1] == i:
#print "Zh", i, "matches Ja", i, "at position", n - j
k = n - j # oerder is from small to big, so k == 1 is the best match
dic[k] = dic.get(k, 0) + 1
acc += 1.0 - (k - 1.0) / n
break
acc = acc * 1.0 / n
return acc
def batch():
for s in xrange(1, 6):
for c in xrange(1, 6):
inputFile = "output/batch_02/exp_s_0.0%i_c_0.0%i.vec.txt" % (s, c)
print calculateAcc(inputFile),
print ""
if __name__ == "__main__":
#print calculateAcc("output/batch_02/ctrl_s_0.03.vec.txt")
batch()
|
#!/usr/bin/python3
# By NOMO
from netmiko import Netmiko
from getpass import getpass
from pprint import pprint
import re
import sys
'''
All of this functions take at least the host_dict parameter.
This is expected to be a dict just like this:
host1 = {
'host': '10.20.20.100',
'username': 'cisco',
'password': 'cisco',
'device_type': 'cisco_ios',
}
'''
# Retruns formatted ARP table. Takes VRF="" as arg:
# LIST of DICTS
def get_arp_ios(host_dict, vrf=""):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_arp_ios - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
if vrf == "":
output = connection.send_command("sh ip arp", use_textfsm = True)
else:
output = connection.send_command("sh ip arp vrf " + vrf, use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Retruns formatted sh ip int brie table:
# LIST of DICTS
def get_ip_int_bri_ios(host_dict):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_ip_int_bri_ios - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
output = connection.send_command("sh ip int brie", use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Returns formatted sh int description
# LIST of DICTS
# !ATENTION! - Uses TEXTFSM template not builtin. To use this you must download the template to
# your template repo and add it to the ntc-templates index file.
def get_int_desc_ios(host_dict):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_int_desc_ios - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
print("Running command")
output = connection.send_command("sh int desc", use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Returns formatted sh cdp neig description
# LIST of DICTS
def get_cdp_neig_ios(host_dict):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_cdp_neig_ios - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
output = connection.send_command("sh cdp neig", use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Returns formatted sh ver
# LIST of 1 DICT element
def get_ver_ios(host_dict):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_sh_ver_ios - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
output = connection.send_command("sh ver", use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Returns formatted sh ver
# LIST of DICTs
def get_mac_address_table(host_dict):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_mac_address_table - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
output = connection.send_command("sh mac addr", use_textfsm = True)
# Return structured data
#print("Returning output")
return output
# Takes host and MAC as arguments
# Returns formated list of single Dict for the CAM entry for the MAC
def get_one_mac_address(host_dict, mac_addr):
hostname = host_dict['host']
#print("Trying connection to " + hostname)
try:
# Establish connection
connection = Netmiko(**host_dict)
except:
print("get_one_mac_address - Could not establish ssh connection to host" + hostname)
return -1
# Run command with textfsm - this should return structured data
#print("Running command")
output = connection.send_command("sh mac addr add %s" %(mac_addr), use_textfsm = True)
# Return structured data
#print("Returning output")
return output
|
import pandas as pd
import glob
import os
# df = pd.read_csv('/home/ec2-user/data/raw/OKEX-BTC-THIS-WEEK-TRADES.csv.2018-09-07.gz',
# compression='gzip',
# )
# print(df.head())
# 1536278411140 0 1411803432779778 BID 6484.75 6.0
# 0 1536278417993 0 1411803879014401 ASK 6482.30 28.0
# 1 1536278425862 0 1411804380364826 BID 6483.06 10.0
# 2 1536278425862 0 1411804394520587 ASK 6482.30 156.0
# 3 1536278426345 0 1411804436398086 ASK 6480.83 16.0
# 4 1536278426345 0 1411804436398088 ASK 6480.82 18.0
# df = pd.read_csv('/home/ec2-user/data/raw/OKEX-BTC-THIS-WEEK-TRADES.csv.2019-02-08.gz',
# compression='gzip',
# )
# print(df.head())
# 1549584036205 0 2283800873500675 BID 3361.99 16.0
# 0 1549584057687 0 2283802292944917 ASK 3361.98 4.0
# 1 1549584057687 0 2283802292944919 ASK 3361.98 4.0
# 2 1549584057688 0 2283802292944921 ASK 3361.88 2.0
# 3 1549584057688 0 2283802292944923 ASK 3361.88 2.0
# 4 1549584057688 0 2283802292944925 ASK 3361.87 4.0
# df = pd.read_csv('/home/ec2-user/data/raw/OKEX-BTC-THIS-WEEK-TRADES.csv.2019-05-19.gz',
# compression='gzip',
# )
# print(df.head())
# 1558224001087 0 2850029620264967 BID 7274.08 5.0
# 0 1558224001087 0 2850029620264969 BID 7274.08 10.0
# 1 1558224001087 0 2850029620264971 BID 7274.08 5.0
# 2 1558224001379 0 2850029631078410 BID 7274.08 10.0
# 3 1558224001379 0 2850029631078412 BID 7274.08 20.0
# 4 1558224001379 0 2850029645234183 ASK 7274.07 45.0
# df = pd.read_csv('/home/ec2-user/data/raw/OKEX-BTC-THIS-WEEK-TRADES.csv.2019-11-16.gz',
# compression='gzip',
# )
# print(df.head())
# 1573862401104 0 363196 BID 8478.53 0.22409545050851973
# 0 1573862401689 0 363197 BID 8478.54 0.672286
# 1 1573862401708 0 363198 BID 8478.54 1.167654
# 2 1573862401708 0 363199 BID 8478.54 0.837408
# 3 1573862402225 0 363200 ASK 8478.79 0.106147
# 4 1573862402260 0 363201 ASK 8478.79 0.070765
|
# Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2
).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc: \t": acc,
"Mean Acc : \t": acc_cls,
"FreqW Acc : \t": fwavacc,
"Mean IoU : \t": mean_iu,
},
cls_iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
class averageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class heightacc(object):
'''
compute acc
'''
def __init__(self):
self.reset()
def reset(self):
#self.r2 = 0 不好计算
self.mse = 0
self.se = 0
self.mae = 0
#self.mape = 0
self.count = 0
self.yrefmean = 0
self.ypref2 = 0
def update(self, ypred, yref, num):
self.se += np.mean(ypred-yref)*num
self.mae += np.mean(np.abs(ypred-yref))*num
self.mse += np.mean((ypred-yref)**2)*num
#self.mape += np.mean(np.abs((ypred-yref)/(1e-8+yref)))*num
self.yrefmean += np.mean(yref)*num
self.ypref2 += np.mean(yref**2)*num
self.count += num
def getacc(self):
se = self.se/self.count
mae = self.mae/self.count
mse = self.mse/self.count
#mape = self.mape/self.count
rmse = np.sqrt(mse)
yrefmean = self.yrefmean/self.count
yref2 = self.ypref2/self.count
r2 = 1 - mse/(yref2 -yrefmean**2)
return r2, rmse, mae, se
|
# --------------------------------------------------------
# FPN
# Copyright (c) 2017 BUPT-PRIV
# Licensed under The MIT License [see LICENSE for details]
# Written by Soeaver Yang
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from fast_rcnn.config import cfg
def assign_pyramid(roi, k0=4, size=224):
roi_width = roi[3] - roi[1]
roi_height = roi[4] - roi[2]
return np.ceil(np.log2(np.sqrt(float(roi_width*roi_height))/float(size)) + k0)
class AssignROISLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
self._pyramid_number = layer_params.get('pyramid_number', ((2, 3, 4, 5)))
self._base_size = layer_params.get('base_scale', 4)
self._pre_training_size = layer_params.get('pre_training_size', 224) # 299 for inception
assert len(top) == len(self._pyramid_number)
for i in xrange(len(top)):
top[i].reshape(1, 5)
def forward(self, bottom, top):
all_rois = bottom[0].data
min_pyramid = min(self._pyramid_number)
max_pyramid = max(self._pyramid_number)
assigned_rois = [[] for _ in xrange(len(self._pyramid_number))] # 2, 3, 4, 5
for _ in all_rois:
k = assign_pyramid(_, k0=self._base_size, size=self._pre_training_size)
k = min(max(min_pyramid, k), max_pyramid)
idx = self._pyramid_number.index(k)
assigned_rois[idx].append(_)
for i in xrange(len(self._pyramid_number)):
rois_blob = np.asarray(assigned_rois[i])
top[i].reshape(*(rois_blob.shape))
top[i].data[...] = rois_blob
# print top[0].data[...].shape
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
|
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
m = Basemap(projection='ortho',lon_0=-105,lat_0=40,resolution='l')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Full Disk Orthographic Projection")
plt.show()
|
from liquer import Q
class B:
bar = 'Hello World!'
baz = 1
class A:
foo = B()
a = A()
q = Q(foo__bar='Hello World!') | Q(foo__bar__istartswith='hello',
foo__baz__gt=1)
assert q(a)
a.foo.bar = 'Hello 2013!'
assert not q(a)
a.foo.baz = 2
assert q(a)
|
from setuptools import setup
setup(
name='nimiq-api-python',
version='0.0.1',
description='A python client for the Nimiq JSON-RPC API',
url='http://github.com/jgraef/nimiq-api-python',
author='Janosch Gräf',
author_email='janosch.graef@cispa.saarland',
license='MIT',
packages=['nimiqrpc'],
zip_safe=True,
install_requires=[
'requests'
],
)
|
import torch
torch.cuda.current_device()
import torch.nn as nn
import torchvision
import random
import pytorch_batch_sinkhorn as spc
# Decide which device we want to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class PixelLoss(nn.Module):
def __init__(self, p=1):
super(PixelLoss, self).__init__()
self.p = p
def forward(self, canvas, gt, ignore_color=False):
if ignore_color:
canvas = torch.mean(canvas, dim=1)
gt = torch.mean(gt, dim=1)
loss = torch.mean(torch.abs(canvas-gt)**self.p)
return loss
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=True):
super(VGGPerceptualLoss, self).__init__()
vgg = torchvision.models.vgg16(pretrained=True).to(device)
blocks = []
blocks.append(vgg.features[:4].eval())
blocks.append(vgg.features[4:9].eval())
blocks.append(vgg.features[9:16].eval())
blocks.append(vgg.features[16:23].eval())
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = torch.nn.ModuleList(blocks)
self.transform = torch.nn.functional.interpolate
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1)
self.std = torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1)
self.resize = resize
def forward(self, input, target, ignore_color=False):
self.mean = self.mean.type_as(input)
self.std = self.std.type_as(input)
if ignore_color:
input = torch.mean(input, dim=1, keepdim=True)
target = torch.mean(target, dim=1, keepdim=True)
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for block in self.blocks:
x = block(x)
y = block(y)
loss += torch.nn.functional.l1_loss(x, y)
return loss
class VGGStyleLoss(torch.nn.Module):
def __init__(self, transfer_mode, resize=True):
super(VGGStyleLoss, self).__init__()
vgg = torchvision.models.vgg16(pretrained=True).to(device)
for i, layer in enumerate(vgg.features):
if isinstance(layer, torch.nn.MaxPool2d):
vgg.features[i] = torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
blocks = []
if transfer_mode == 0: # transfer color only
blocks.append(vgg.features[:4].eval())
blocks.append(vgg.features[4:9].eval())
else: # transfer both color and texture
blocks.append(vgg.features[:4].eval())
blocks.append(vgg.features[4:9].eval())
blocks.append(vgg.features[9:16].eval())
blocks.append(vgg.features[16:23].eval())
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = torch.nn.ModuleList(blocks)
self.transform = torch.nn.functional.interpolate
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
self.std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
self.resize = resize
def gram_matrix(self, y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * w * h)
return gram
def forward(self, input, target):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input - self.mean) / self.std
target = (target - self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for block in self.blocks:
x = block(x)
y = block(y)
gm_x = self.gram_matrix(x)
gm_y = self.gram_matrix(y)
loss += torch.sum((gm_x-gm_y)**2)
return loss
class SinkhornLoss(nn.Module):
def __init__(self, epsilon=0.01, niter=5, normalize=False):
super(SinkhornLoss, self).__init__()
self.epsilon = epsilon
self.niter = niter
self.normalize = normalize
def _mesh_grids(self, batch_size, h, w):
a = torch.linspace(0.0, h - 1.0, h).to(device)
b = torch.linspace(0.0, w - 1.0, w).to(device)
y_grid = a.view(-1, 1).repeat(batch_size, 1, w) / h
x_grid = b.view(1, -1).repeat(batch_size, h, 1) / w
grids = torch.cat([y_grid.view(batch_size, -1, 1), x_grid.view(batch_size, -1, 1)], dim=-1)
return grids
def forward(self, canvas, gt):
batch_size, c, h, w = gt.shape
if h > 24:
canvas = nn.functional.interpolate(canvas, [24, 24], mode='area')
gt = nn.functional.interpolate(gt, [24, 24], mode='area')
batch_size, c, h, w = gt.shape
canvas_grids = self._mesh_grids(batch_size, h, w)
gt_grids = torch.clone(canvas_grids)
# randomly select a color channel, to speedup and consume memory
i = random.randint(0, 2)
img_1 = canvas[:, [i], :, :]
img_2 = gt[:, [i], :, :]
mass_x = img_1.reshape(batch_size, -1)
mass_y = img_2.reshape(batch_size, -1)
if self.normalize:
loss = spc.sinkhorn_normalized(
canvas_grids, gt_grids, epsilon=self.epsilon, niter=self.niter,
mass_x=mass_x, mass_y=mass_y)
else:
loss = spc.sinkhorn_loss(
canvas_grids, gt_grids, epsilon=self.epsilon, niter=self.niter,
mass_x=mass_x, mass_y=mass_y)
return loss
|
import os
import requests
import time
#download method
def download_file(url, filename):
''' Downloads file from the url and save it as filename '''
# check if file already exists
if not os.path.isfile(filename):
print('Downloading File')
response = requests.get(url)
# Check if the response is ok (200)
if response.status_code == 200:
# Open file and write the content
with open(filename, 'wb') as file:
# A chunk of 128 bytes
for chunk in response:
file.write(chunk)
time.sleep(1)
else:
print('File exists')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = "0.1.0"
__author__ = "Angus Alves"
__credits__ = "Props to my peeps"
__copyright__ = "(C) 2016-2016 Angus Alves. MIT"
import sys,os,shutil,date
class PythonWrastler:
def __init__(self,packagename=None,author=None,credits=None,created=None,crholder=None,license=None):
self.packagename = packagename
self.author = author
self.credits = credits
self.created = created
self.crholder = crholder
self.license = license
self.year = date.strftime("%Y")
def getParams(self):
self.packagename = raw_input("[?] Enter a name for this package")
self.author = raw_input("[?] Enter the name of the author")
self.credits = raw_input("[?] Enter the credits")
self.created = raw_input("[?] Enter the year created(yyyy)")
self.crholder = raw_input("[?] Enter the name of copy right holder")
self.license = raw_input("[?] Enter the type of license")
def setupProject(self,getparams):
if getparams==True:
self.getParams()
else:
if type(self.packagename) is not str:
return "[-] Missing package name"
elif type(self.author) is not str:
return "[-] Missing author"
elif type(self.credits) is not str:
return "[-] Missing credits"
elif type(self.created) is not str:
return "[-] Missing year created"
elif type(self.crholder) is not str:
return "[-] Missing name of copy right holder"
elif type(self.license) is not str:
return "[-] Missing type of license"
shutil.copyfile("../templates/pythoned.py","./"+self.packagename+".py")
pyf = open("./"+self.packagename+".py","r+")
pyfcontents = pyf.read()
pyfcontents.replace("{!packagename}", self.packagename)
pyfcontents.replace("{!author}", self.author)
pyfcontents.replace("{!credits}", self.credits)
pyfcontents.replace("{!created}", self.created)
pyfcontents.replace("{!crholder}", self.crholder)
pyfcontents.replace("{!license}", self.license)
pyfcontents.replace("{!year}", self.year)
def main():
if len(sys.argv) == 7:
pw = PythonWrastler(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5],sys.argv[6])
pw.setupProject(False)
else:
pw = PythonWrastler()
pw.setupProject(True)
if __name__ == "__main__":
main()
|
#! /usr/bin/python
import sys
import string
import psyco
import gc
#my_data=[line.split for line in file('data.txt')]
from psyco.classes import *
psyco.full()
fileName="data.txt"
thresNum=5
thresProb=0.5
Gcnt=0
class Rule:
def __init__(self,cond=(),rslt=(),prob=0.0,cosn=0.0,lift=1.0):
self.cond=cond
self.rslt=rslt
self.prob=prob
self.cosn=cosn
self.lift=lift
class Item:
def __init__(self,itemlst=[],indexlst=[]):
self.itemlst=itemlst
self.indexlst=indexlst
def dispItem(item):
print "itemlst: ",;print item.itemlst
print "indexlst:",;print item.indexlst
def dispItemSetLst(itemsetlst):
for item in itemsetlst:
print "itemlst: ",;print item.itemlst,
print "indexlst:",;print len(item.indexlst)
#print "indexlst:",;print item.indexlst
def generate_one_itemset(filename):
my_data=[line.split() for line in file(filename)]
oneSet={}
i=-1
for trans in my_data:
i=i+1
for item in trans:
item=int(item)
if oneSet.has_key(item):
oneSet[item].append(i)
else:
oneSet[item]=list([i,])
onelst=[]
for k,v in oneSet.iteritems():
onelst.append(Item([int(k)],v))
onelst=select_freq(onelst)
return onelst
def select_freq(itemlst):
return filter(lambda item:len(item.indexlst)>=thresNum,itemlst)
def generate_next_cand_itemset(freqLst):
cand_next=[]
cnt=0
for i in range(len(freqLst)):
item1=freqLst[i]
gc.collect()
for j in range(i+1,len(freqLst)):
cnt=cnt+1
if cnt%100000==0:
print '-',
sys.__stdout__.flush()
item2=freqLst[j]
if prefixEqu(item1,item2):
cand_next.append(merge(item1,item2))
else:
break
print ''
return cand_next
def prefixEqu(item1,item2):
return item1.itemlst[:-1]==item2.itemlst[:-1]
def merge(item1,item2):
if item1.itemlst[-1]<item2.itemlst[-1]:
itemlst=item1.itemlst+item2.itemlst[-1:]
else:
itemlst=item2.itemlst+item1.itemlst[-1:]
idxlst=set(item1.indexlst)&set(item2.indexlst)
return Item(itemlst,idxlst)
def loop(oneSet):
allSet=[oneSet,]
candSet=oneSet
print "onesSet length %d" % len(candSet)
cnt=1
while 1:
cnt=cnt+1
print "loop",;print cnt
print "generate %d cand itemset..." % cnt
next=generate_next_cand_itemset(candSet)
print "select freq set..."
freqSet=select_freq(next)
print len(freqSet)
#dispItemSetLst(freqSet)
if len(freqSet)==0:
break
candSet=freqSet
allSet.append(freqSet)
gc.collect()
if cnt==6:
sys.exit()
return allSet
if __name__=="__main__":
print "begin..."
print "generate one itemlst"
onelst=generate_one_itemset(fileName)
dispItemSetLst(onelst)
print "loop..."
loop(onelst)
print "DONE!"
|
from setuptools import setup, find_packages
import re
VERSIONFILE = "Solos/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name='solos',
version=verstr,
description='Python implementation of Solos: A Dataset for Audio-Visual Music Source Separation and Localization',
url='https://juanfmontesinos.github.io/Solos/',
author='Juan Montesinos',
author_email='juanfelipe.montesinos@upf.edu',
packages=find_packages(),
install_requires=['Fire', 'youtube_dl', 'googledrivedownloader'],
package_data={'Solos': 'json_files/solos_ids.json'},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3", ],
zip_safe=False)
|
"""Package for retrieving and uploading product quantities to Opencart."""
import copy
import json
import logging
import requests
from errors import Error, NotFoundError, MultipleResultsError, CommunicationError
_LIST_PRODUCTS_ENDPOINT = 'module/store_sync/listlocalproducts'
# TODO(nmcapule): Endpoint
_UPDATE_PRODUCT_QUANTITY_ENDPOINT = 'module/store_sync/setlocalquantity'
class OpencartProduct(object):
"""Describes a Opencart uploaded product."""
def __init__(self, model, quantity=0):
self.model = model
self.quantity = quantity
self._modified = False
@property
def stocks(self):
"""Getter for (available) stocks. Alias for quantity."""
return self.quantity
@stocks.setter
def stocks(self, value):
"""Setter for (available) stocks."""
self._modified = True
self.quantity = value
@property
def modified(self):
"""Flag for when a Opencart product's attribute has been modified."""
return self._modified
class OpencartRequestResult:
"""Describes a request result from querying Opencart."""
def __init__(
self, attachment=None, endpoint='', payload='', result=None, error_code=0,
error_description=''):
self.attachment = attachment
self.endpoint = endpoint
self.payload = payload
self.result = result
self.error_code = error_code
self.error_description = error_description
class OpencartClient:
"""Implements a Opencart Client."""
def __init__(self, domain, username, password):
self._domain = domain
self._username = username
self._password = password
self._products = []
self.Refresh()
def _Request(self, endpoint, payload='', content_parser=None):
"""Creates and sends a request to the given Opencart endpoint.
Args:
endpoint: str, The endpoint to send the request.
payload: any, Parameter arguments.
Returns:
OpencartRequestResult, The formatted response.
Raises:
CommunicationError: Cannot communicate properly with Opencart.
"""
if payload:
payload = '&' + payload
params = {
'username': self._username,
'password': self._password,
'redirect': '{0}{1}{2}'.format(self._domain, endpoint, payload)
}
session = requests.Session()
r = session.post(self._domain + "common/login", data=params)
error_code = 0
error_description = 'SUCCESS'
result = OpencartRequestResult(
endpoint=endpoint, payload=payload, error_code=error_code,
error_description=error_description)
if not content_parser:
result.result = r.content
else:
result.result = content_parser(r.content)
return result
def Refresh(self):
"""Refreshes product records from Opencart.
Raises:
CommunicationError: Cannot communicate properly with Opencart.
"""
items = []
def content_parser(str):
json_stub = json.loads(str)
for json_product in json_stub:
item = OpencartProduct(
model=json_product['model'],
quantity=int(json_product['quantity']))
items.append(item)
result = self._Request(
_LIST_PRODUCTS_ENDPOINT, content_parser=content_parser)
if not items:
raise CommunicationError(
'Somehow, zero items retrieved from Opencart!')
self._products = items
return self
def GetProduct(self, model):
"""Returns a copy of a product detail.
Args:
model: string, The sku / model of the product being retrieved.
Raises:
NotFoundError: The sku / model of the product is not in Opencart.
MultipleResultsError: The sku / model is not unique in Opencart.
"""
results = [p for p in self._products if p.model == model]
if not results:
raise NotFoundError('Not found in Opencart: %s' % model)
if len(results) > 1:
logging.error('Multiple results in Opencart: %s' % model)
# raise MultipleResultsError('Multiple results in Opencart: %s' % model)
return copy.deepcopy(results[0])
def ListProducts(self):
"""Returns a copy of internal dictionary."""
return copy.deepcopy(self._products)
def UpdateProductStocks(self, model, stocks):
"""Updates a single products stock.
Args:
model: str, The sku / model of the product to be updated.
stocks: int, The new number of stocks of the product.
Raises:
NotFoundError: The sku / model of the product is not in Opencart.
MultipleResultsError: The sku / model is not unique in Opencart.
CommunicationError: Cannot communicate properly with Opencart.
"""
product = self.GetProduct(model)
product.stocks = stocks
return self.UpdateProducts([product])[0]
def UpdateProducts(self, products):
"""Updates Opencart records from the given list of products.
Args:
products: list<OpencartProduct>, The products with quantity changes to
upload.
Raises:
CommunicationError: Cannot communicate properly with Opencart
"""
def _CreateUpdateProductPayload(model, quantity):
return 'model=%s&quantity=%s' % (model, quantity,)
results = []
for p in products:
if not p.modified:
continue
payload = _CreateUpdateProductPayload(p.model, p.quantity)
result = self._Request(_UPDATE_PRODUCT_QUANTITY_ENDPOINT, payload)
result.attachment = p
results.append(result)
return results
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
domain = 'https://circuit.rocks/admin/index.php?route='
username = ''
password = ''
client = OpencartClient(domain, username, password)
p = client.GetProduct('WHC0011RF')
logging.info('%s %d %d' % (p.model, p.quantity, p.stocks,))
|
from datetime import datetime
from django.contrib.auth.models import User
from rest_framework import routers
from rest_framework import serializers
from rest_framework import viewsets
from rest_framework import status
from rest_framework import permissions
from rest_framework.decorators import detail_route
from rest_framework.decorators import api_view
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from oysterapp.oyster.models import Task
from oysterapp.oyster.models import Wish
from oysterapp.oyster.models import create_wish_from_url
from oysterapp.oyster.models import BillableItem
from oysterapp.oyster.models import TaskRule
from amazonproduct import API
# Serializers define the API representation.
class UserSerializer(serializers.ModelSerializer):
bank = serializers.FloatField(source='userprofile.piggy_bank')
small_amount = serializers.FloatField(source='userprofile.small_amount')
mid_amount = serializers.FloatField(source='userprofile.mid_amount')
large_amount = serializers.FloatField(source='userprofile.large_amount')
last_seen = serializers.DateTimeField(source='userprofile.last_seen')
class Meta:
model = User
fields = ('url', 'username', 'id', 'bank', 'small_amount',
'mid_amount', 'large_amount', 'last_seen')
depth = 1
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
def update(self, request, pk):
updated = super(UserViewSet, self).update(request)
instance = self.get_object()
userprofile = instance.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return updated
@api_view(['GET'])
def current_user(request):
serializer = UserSerializer(request.user, context={'request': request})
response = Response(serializer.data)
userprofile = request.user.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return response
class WishSerializer(serializers.ModelSerializer):
class Meta:
model = Wish
class WishViewSet(viewsets.ModelViewSet):
serializer_class = WishSerializer
def get_queryset(self):
return Wish.objects.filter(user=self.request.user, completed=False)
def create(self, request, *args, **kwargs):
data = request.DATA
if data.get('amazon_link'):
wish = create_wish_from_url(request.user, data.get('amazon_link'))
else:
wish = Wish(
url=data.get('url'),
image_url=data.get('image_url'),
amount=data.get('amount'),
title=data.get('title'),
user=request.user
)
wish.save()
serializer = WishSerializer(wish)
return Response(serializer.data)
def update(self, request, pk):
instance = self.get_object()
userprofile = instance.user.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return super(WishViewSet, self).update(request)
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
class BillableItemSerializer(serializers.ModelSerializer):
class Meta:
model = BillableItem
class HistoryViewSet(viewsets.ModelViewSet):
serializer_class = BillableItemSerializer
def get_queryset(self):
return BillableItem.objects.filter(user=self.request.user, completed=True).order_by('-updated')
def update(self, request, pk):
instance = self.get_object()
userprofile = instance.user.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return super(HistoryViewSet, self).update(request)
class IncompleteTaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
def get_queryset(self):
return Task.objects.filter(user=self.request.user, completed=False).order_by('-created')
def create(self, request, *args, **kwargs):
data = request.DATA
data['user'] = request.user
task = Task.objects.create(**data)
serialized_data = data
serialized_data['user'] = request.user.id
serialized_data['id'] = task.id
serialized_data['created'] = task.created
serialized_data['updated'] = task.updated
return Response(serialized_data,
status=status.HTTP_201_CREATED)
def update(self, request, pk):
updated = super(IncompleteTaskViewSet, self).update(request)
instance = self.get_object()
userprofile = instance.user.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return updated
class TaskRuleSerializer(serializers.ModelSerializer):
class Meta:
model = TaskRule
class TaskRuleViewSet(viewsets.ModelViewSet):
serializer_class = TaskRuleSerializer
lookup_field = 'uuid'
lookup_value_regex = '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
def get_queryset(self):
return TaskRule.objects.filter(user=self.request.user)
def create(self, request, *args, **kwargs):
data = request.DATA
task_rule = None
if (data.get('frequency') and data.get('scale')) or data.get('regenerate_on_completion'):
task_rule = TaskRule.objects.create(
user=request.user,
amount=float(data.get('amount')),
title=data.get('title'),
frequency=data.get('frequency'),
scale=data.get('scale'),
uuid=data.get('temp_guid'),
completable_by=data.get('completable_by')
)
task = Task.objects.create(
user=request.user,
amount=float(data.get('amount')),
title=data.get('title'),
task_rule=task_rule,
doable=bool(data.get('completable_by') == 'Oyster')
)
return Response(data, status=status.HTTP_201_CREATED)
def update(self, request, pk):
instance = self.get_object()
userprofile = instance.user.userprofile
userprofile.last_seen = datetime.now()
userprofile.save()
return super(TaskRuleViewSet, self).update(request)
@detail_route(methods=['get'])
def completed(self, request, uuid=None):
task_rule = TaskRule.objects.get(uuid=uuid)
task = task_rule.get_first_open_task()
if task:
task.completed = True
task.save()
return Response({}, status=status.HTTP_201_CREATED)
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
try:
driver.get("https://the-internet.herokuapp.com/javascript_alerts")
button1 = driver.find_element(By.CSS_SELECTOR, '#content > div > ul > li:nth-child(1) > button')
button1.click()
driver.switch_to.alert.accept()
button2 = driver.find_element(By.CSS_SELECTOR, '#content > div > ul > li:nth-child(2) > button')
button2.click()
driver.switch_to.alert.accept()
button2 = driver.find_element(By.CSS_SELECTOR, '#content > div > ul > li:nth-child(2) > button')
button2.click()
driver.switch_to.alert.dismiss()
button3 = driver.find_element(By.CSS_SELECTOR, '#content > div > ul > li:nth-child(3) > button')
button3.click()
driver.switch_to.alert.send_keys('ewfqwedgerbqerg')
driver.switch_to.alert.accept()
time.sleep(3)
finally:
driver.quit()
|
__author__ = 'Antony Cherepanov'
import pickle
import json
import os
def save_dict_to_file_via_pickle():
print("\nsave_dict_to_file_via_pickle()")
simple_dict = {"key1": 224, "kkl": "strong"}
print("Our dict: " + str(simple_dict))
print("Let's serialise it and save to file")
test_file = open("datafile.pkl", "wb")
pickle.dump(simple_dict, test_file)
test_file.close()
print("Let's see what inside: " + str(open("datafile.pkl", "rb").read()))
print("And now recreate it from file!")
reopened_test_file = open("datafile.pkl", "rb")
recreated_dict = pickle.load(reopened_test_file)
reopened_test_file.close()
print("Recreated dict: " + str(recreated_dict))
print("Are they the same: " + str(simple_dict == recreated_dict))
os.remove("datafile.pkl")
def save_dict_as_json():
print("\nsave_dict_as_json()")
simple_dict = {"key1": 224, "kkl": "strong"}
print("Our dict: " + str(simple_dict))
print("Let's serialise it and save to json file")
json.dump(simple_dict, fp=open("testjson.txt", "w"))
print("Let's see what inside: " + open("testjson.txt").read())
recreated_dict = json.load(open("testjson.txt"))
print("Recreated dict: " + str(recreated_dict))
os.remove("testjson.txt")
save_dict_to_file_via_pickle()
save_dict_as_json()
|
from django.http import JsonResponse
from django.db.models import Count, Q
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.views.generic import View, ListView, DetailView, CreateView, UpdateView, DeleteView
from .forms import CommentForm, PostForm
from .models import Post, Author, PostView,Comment,Activity
from marketing.forms import EmailSignupForm
from marketing.models import Signup
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.template.defaultfilters import slugify
from django.core.mail import send_mail
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from PIL import Image
from django.http import HttpResponse
import uuid
form = EmailSignupForm()
def get_author(user):
qs = Author.objects.filter(user=user)
if qs.exists():
return qs[0]
return None
def get_category_count():
queryset = Post \
.objects \
.values('categories__title') \
.annotate(Count('categories__title'))
return queryset
def get_tags_count():
queryset = Post \
.objects \
.values('tags__title') \
.annotate(Count('tags__title'))
print(queryset)
return queryset
class SearchView(View):
def get(self, request, *args, **kwargs):
queryset = Post.objects.all()
query = request.GET.get('q')
if query:
queryset = queryset.filter(
Q(title__icontains=query) |
Q(overview__icontains=query)
).distinct()
context = {
'queryset': queryset
}
return render(request, 'search_results.html', context)
def blog_category(request, category):
category_count = get_category_count()
most_recent = Post.objects.order_by('-timestamp')[:3]
post_list = Post.objects.filter(
categories__title=category
)
tags = get_tags_count()
paginator = Paginator(post_list, 4)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'queryset': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count,
'tags': tags,
'form': form
}
return render(request, 'blog.html', context)
def ContactView(request):
message_email = ""
if request.method == 'POST':
message_email = request.POST['email']
message_subject = request.POST['subject']
message = request.POST['message']
#send a email
send_mail(message_subject, message, settings.EMAIL_HOST_USER, [message_email])
return render(request, "contacts.html", {'msg': message_email})
def blog_tags(request, tags):
category_count = get_category_count()
most_recent = Post.objects.order_by('-timestamp')[:3]
post_list = Post.objects.filter(
tags__title=tags
)
tags = get_tags_count()
paginator = Paginator(post_list, 4)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'queryset': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count,
'tags': tags,
'form': form
}
return render(request, 'blog.html', context)
class IndexView(View):
form = EmailSignupForm()
def get(self, request, *args, **kwargs):
category_count = get_category_count()
most_recent = Post.objects.filter(featured=True).order_by('-timestamp')[:3]
post_list = Post.objects.filter(featured=True).all()
tags = get_tags_count()
paginator = Paginator(post_list, 2)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
paginated_queryset = paginator.page(page)
except PageNotAnInteger:
paginated_queryset = paginator.page(1)
except EmptyPage:
paginated_queryset = paginator.page(paginator.num_pages)
context = {
'object_list': paginated_queryset,
'most_recent': most_recent,
'page_request_var': page_request_var,
'category_count': category_count,
'tags': tags,
'form': self.form
}
return render(request, 'index.html', context)
def post(self, request, *args, **kwargs):
email = request.POST.get("email")
new_signup = Signup()
new_signup.email = email
new_signup.save()
messages.info(request, "Successfully subscribed")
return redirect("home")
class PostListView(ListView):
form = EmailSignupForm()
model = Post
template_name = 'blog.html'
context_object_name = 'queryset'
paginate_by =2
def get_context_data(self, **kwargs):
category_count = get_category_count()
most_recent = Post.objects.filter(featured=True).order_by('-timestamp')[:3]
tags = get_tags_count()
context = super().get_context_data(**kwargs)
context['most_related'] = most_recent
context['page_request_var'] = "page"
context['category_count'] = category_count
context['tags'] = tags
context['form'] = self.form
return context
class PostDetailView(DetailView):
model = Post
template_name = 'post.html'
context_object_name = 'post'
form = CommentForm()
def get_object(self):
obj = super().get_object()
if self.request.user.is_authenticated:
PostView.objects.get_or_create(
user=self.request.user,
post=obj
)
return obj
def get_context_data(self, **kwargs):
category_count = get_category_count()
post = get_object_or_404(Post, id= self.kwargs['pk'])
rhs_tags = get_tags_count()
most_relative = Post.objects.order_by('-timestamp')[:3]
liked = False
if post.votes.filter(id = self.request.user.id).exists():
liked = True
current_author = get_author(self.request.user)
userpostcount = post_count(current_author)
author_votecount = 0
for n in Post.objects.filter(author=current_author):
author_votecount += n.votes.count()
context = super().get_context_data(**kwargs)
context['most_relative'] = most_relative
context['page_request_var'] = "page"
context['category_count'] = category_count
context['liked'] = liked
context['vote_count'] = post.vote_count
context['tags'] = post.get_tags[0],
context['rhs_tags'] = rhs_tags[0]
context['author_details'] = post.get_authordetails
context['authorpostcount'] = userpostcount
context['authorvotecount'] = author_votecount
context['form'] = self.form
return context
def post(self, request, *args, **kwargs):
form = CommentForm(request.POST)
if form.is_valid():
post = self.get_object()
form.instance.user = request.user
form.instance.post = post
form.save()
return redirect(reverse("post-detail", kwargs={
'pk': post.pk
}))
class PostCreateView(CreateView):
model = Post
template_name = 'post_create.html'
form_class = PostForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Create'
return context
def form_valid(self, form):
if self.request.user.is_authenticated:
form.instance.author = get_author(self.request.user)
form.save()
return redirect(reverse("post-detail", kwargs={
'pk': form.instance.pk,
'slug': slugify(form.instance.title)
}))
else:
return redirect("/accounts/login")
def upload(request):
try:
file = request.FILES['docf']
updatedImgName = str(uuid.uuid4())
imgExt = file.name.split('.')[1]
img = Image.open(file)
img.thumbnail((500, 500), Image.ANTIALIAS)
try:
img.save(settings.MEDIA_ROOT+"/blog_img/" + updatedImgName+"."+imgExt)
print("img save pass")
except:
print("img.save error")
path = "/media/blog_img/" + updatedImgName+"."+imgExt
return JsonResponse({"imgpath": path}, status=200)
except Exception:
return HttpResponse("error")
class PostUpdateView(UpdateView):
model = Post
template_name = 'post_create.html'
form_class = PostForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Update'
return context
def form_valid(self, form):
form.instance.author = get_author(self.request.user)
form.save()
return redirect(reverse("post-detail", kwargs={
'pk': form.instance.pk,
'slug': slugify(form.instance.title)
}))
def VoteView(request):
iddy = request.POST.get('post_id')
if request.method == 'POST':
post = get_object_or_404(Post, id= iddy)
liked = False
if post.votes.filter(id = request.user.id).exists():
post.votes.remove(request.user)
liked = False
else:
post.votes.add(request.user)
liked = True
return JsonResponse({"data": liked}, status=200)
class PostDeleteView(DeleteView):
model = Post
success_url = '/blog'
template_name = 'post_confirm_delete.html'
class user_dashboard(View):
def get(self,request):
count=0
com_count=0
user=request.user
author=get_author(request.user)
my_post_count=post_count(get_author(request.user))
my_published_post=Post.objects.filter(author=author)
Activity_list=Activity.objects.filter(user=request.user)
for x in my_published_post:
com_count+= Comment.objects.filter(post=x).count()
for n in Post.objects.filter(author=author):
count += n.votes.count()
context={
'activity_list':Activity_list,
'author':author,
'total_votes':count,
'post_count':my_post_count,
'my_published_post':my_published_post,
'total_comments':com_count}
return render(request,'account/profile_temp.html',context)
class Activity_view(View):
def get(self,request):
Activity_list=Activity.objects.filter(user=request.user)
context={
'activity_list':Activity_list
}
return render(request,'account/profile_temp.html',context)
def post_count(user_id):
my_post=Post.objects.filter(author=user_id).count()
return my_post
def post_delete(request, id):
post = get_object_or_404(Post, id=id)
post.delete()
return redirect(reverse("post-list"))
def PracticeView(request):
message_email = ""
if request.method == 'GET':
return render(request, "practice.html")
|
#coding:utf-8
#
# id: bugs.core_5970
# title: Built-in cryptographic functions
# decription:
# Issues found during implementing this test - see CORE-6185, CORE-6186.
# This test checks only ability to call ENCRYPT()/DECRYPT() functions with different parameters.
# Also, it checks that <source> -> encrypt(<source>) -> decrypt(encrypted_source) gives the same <source>.
#
# Checked on:
# 4.0.0.1646 SS: 3.657s.
# 4.0.0.1637 SC: 3.271s.
# 4.0.0.1633 CS: 4.191s.
#
# tracker_id: CORE-5970
# min_versions: ['4.0.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set blob all;
set list on;
create or alter procedure sp_block_test(a_alg varchar(30)) as begin end;
create or alter procedure sp_stream_test(a_alg varchar(30)) as begin end;
commit;
recreate table test( crypto_alg varchar(30), source_text blob, crypto_key varchar(128), crypto_iv varchar(128) );
recreate global temporary table gtt_tmp(
source_text blob
,encrypted_text blob
) on commit delete rows;
commit;
recreate table secure_table(secret_field varchar(1000), init_vector varchar(16) );
insert into secure_table(secret_field, init_vector) values( lpad('',1000, 'A'), '1234567890123456');
commit;
--set echo on;
-- Should NOT cause any errors when call encrypt() decrypt() for these params:
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'AES', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'ANUBIS', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'BLOWFISH', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'KHAZAD', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC5', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC6', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( '"SAFER+"', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'TWOFISH', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'XTEA', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'CHACHA20', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC4', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), null );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'SOBER128', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
-- Should cause FAILS: invalid length of keys:
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'AES', lpad('', 65535, gen_uuid()), lpad('',11, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'ANUBIS', lpad('', 65535, gen_uuid()), lpad('',12, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'BLOWFISH', lpad('', 65535, gen_uuid()), lpad('',13, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'KHAZAD', lpad('', 65535, gen_uuid()), lpad('',14, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC5', lpad('', 65535, gen_uuid()), lpad('',15, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC6', lpad('', 65535, gen_uuid()), lpad('',17, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( '"SAFER+"', lpad('', 65535, gen_uuid()), lpad('',18, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'TWOFISH', lpad('', 65535, gen_uuid()), lpad('',19, uuid_to_char( gen_uuid() )), lpad('',16, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'XTEA', lpad('', 65535, gen_uuid()), lpad('',20, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'CHACHA20', lpad('', 65535, gen_uuid()), lpad('',21, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC4', lpad('', 65535, gen_uuid()), lpad('',22, uuid_to_char( gen_uuid() )), null );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'SOBER128', lpad('', 65535, gen_uuid()), lpad('',23, uuid_to_char( gen_uuid() )), lpad('', 8, uuid_to_char( gen_uuid() )) );
-- Should cause FAILS: invalid length of IVs:
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'AES', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',11, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'ANUBIS', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',13, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'BLOWFISH', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',15, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'KHAZAD', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',17, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC5', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',19, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC6', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',21, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( '"SAFER+"', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',23, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'TWOFISH', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',25, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'XTEA', lpad('', 65535, gen_uuid()), lpad('',26, uuid_to_char( gen_uuid() )), lpad('',27, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'CHACHA20', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',29, uuid_to_char( gen_uuid() )) );
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'RC4', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',31, uuid_to_char( gen_uuid() )) ); -- IV not needed for this alg
insert into test( crypto_alg, source_text, crypto_key, crypto_iv) values( 'SOBER128', lpad('', 65535, gen_uuid()), lpad('',16, uuid_to_char( gen_uuid() )), lpad('',33, uuid_to_char( gen_uuid() )) );
commit;
set term ^;
create or alter procedure sp_block_test(a_alg varchar(30))
returns(
encryption_algorithm varchar(30)
,encryption_mode varchar(10)
,enc_key_octet_length int
,enc_init_vector_octet_length int
,encrypted_equals_to_decrypted boolean
,encryption_finish_gdscode int
) as
declare v_encrypted blob;
declare v_encrypt_sttm blob;
declare v_decrypt_sttm blob;
declare s_source_text blob;
declare s_decrypted_text blob;
begin
delete from gtt_tmp;
for
select
t.source_text
,t.crypto_alg
,t.crypto_key
,t.crypto_iv
from test t
where upper( t.crypto_alg ) = upper( :a_alg )
as cursor c
do begin
encryption_algorithm = c.crypto_alg;
enc_key_octet_length = octet_length( c.crypto_key );
enc_init_vector_octet_length = octet_length( c.crypto_iv );
-- block_cipher ::= { AES | ANUBIS | BLOWFISH | KHAZAD | RC5 | RC6 | SAFER+ | TWOFISH | XTEA }
-- mode ::= { CBC | CFB | CTR | ECB | OFB }
for
select 'CBC' as mode from rdb$database union all
select 'CFB' from rdb$database union all -- AES
select 'CTR' from rdb$database union all -- AES
select 'ECB' from rdb$database union all
select 'OFB' from rdb$database -- AES
as cursor cm
do begin
encryption_mode = cm.mode;
encrypted_equals_to_decrypted = null;
encryption_finish_gdscode = null;
begin
-- Mode should be specified for block ciphers.
-- Initialization vector (IV) should be specified for block ciphers in all modes except ECB and all stream ciphers except RC4.
insert into gtt_tmp(source_text) values(c.source_text);
s_source_text = c.source_text;
-- This caused crash when length of string was 65535; sent letter to Alex et al, 11.11.2019:
-- v_encrypt_sttm = 'select encrypt( q''{' || c.source_text || '}'' using ' || c.crypto_alg || ' mode ofb key q''{' || c.crypto_key || '}'' iv q''{' || c.crypto_iv || '}'' ) from rdb$database';
v_encrypt_sttm = 'select encrypt( t.source_text using ' || c.crypto_alg || ' mode ' || cm.mode || ' key q''{' || c.crypto_key || '}'' iv q''{' || c.crypto_iv || '}'' ) from gtt_tmp t';
execute statement v_encrypt_sttm into v_encrypted;
update gtt_tmp t set t.encrypted_text = :v_encrypted;
v_decrypt_sttm = 'select decrypt( t.encrypted_text using ' || c.crypto_alg || ' mode ' || cm.mode || ' key q''{' || c.crypto_key || '}'' iv q''{' || c.crypto_iv || '}'' ) from gtt_tmp t';
execute statement v_decrypt_sttm into s_decrypted_text;
encrypted_equals_to_decrypted = false;
if ( hash(s_source_text) = hash(s_decrypted_text) ) then
if (s_source_text = s_decrypted_text) then
encrypted_equals_to_decrypted = true;
when any do
begin
-- 335545230 : TomCrypt library error: Invalid argument provided.
-- 335545234 : Encrypting in CBC mode
-- 335545224 : Initialization vector (IV) makes no sense for chosen cipher and/or mode
encryption_finish_gdscode = gdscode;
end
end
suspend;
delete from gtt_tmp;
end
end
end
^
create or alter procedure sp_stream_test(a_alg varchar(30))
returns(
encryption_algorithm varchar(30)
,enc_key_octet_length int
,enc_init_vector_octet_length int
,encrypted_equals_to_decrypted boolean
,encryption_finish_gdscode int
) as
declare v_encrypted blob;
declare v_encrypt_sttm blob;
declare v_decrypt_sttm blob;
declare s_source_text blob;
declare s_decrypted_text blob;
declare iv_suffix blob;
begin
delete from gtt_tmp;
for
select
t.source_text
,t.crypto_alg
,t.crypto_key
,t.crypto_iv
from test t
where upper( t.crypto_alg ) = upper( :a_alg )
as cursor c
do begin
-- stream_cipher ::= { CHACHA20 | RC4 | SOBER128 }
encryption_algorithm = c.crypto_alg;
enc_key_octet_length = octet_length( c.crypto_key );
encryption_finish_gdscode = null;
begin
-- Mode should be specified for block ciphers.
-- Initialization vector (IV) should be specified for block ciphers in all modes except ECB and all stream ciphers except RC4.
insert into gtt_tmp(source_text) values(c.source_text);
s_source_text = c.source_text;
enc_init_vector_octet_length = 0;
if ( upper( :a_alg ) = upper('RC4') ) then
iv_suffix= '';
else
begin
iv_suffix= ' iv q''{' || c.crypto_iv || '}'' ';
enc_init_vector_octet_length = octet_length(c.crypto_iv);
end
v_encrypt_sttm = 'select encrypt( t.source_text using ' || c.crypto_alg || ' key q''{' || c.crypto_key || '}'' ' || iv_suffix || ') from gtt_tmp t';
execute statement v_encrypt_sttm into v_encrypted;
update gtt_tmp t set t.encrypted_text = :v_encrypted;
v_decrypt_sttm = 'select decrypt( t.encrypted_text using ' || c.crypto_alg || ' key q''{' || c.crypto_key || '}'' ' || iv_suffix || ') from gtt_tmp t';
execute statement v_decrypt_sttm into s_decrypted_text;
encrypted_equals_to_decrypted = false;
if ( hash(s_source_text) = hash(s_decrypted_text) ) then
if (s_source_text = s_decrypted_text) then
encrypted_equals_to_decrypted = true;
when any do
begin
encryption_finish_gdscode = gdscode;
end
end
suspend;
delete from gtt_tmp;
end
end
^
set term ;^
commit;
---------------------------------------
set bail off;
-- 1. Main checks:
-- ###############
-- 1.1 Block cipher:
select * from sp_block_test('aes');
select * from sp_block_test('anubis');
select * from sp_block_test('blowfish');
select * from sp_block_test('khazad');
select * from sp_block_test('rc5');
select * from sp_block_test('rc6');
select * from sp_block_test('"safer+"');
select * from sp_block_test('twofish');
select * from sp_block_test('xtea');
-- 1.2 Stream cipher:
select * from sp_stream_test('chacha20');
select * from sp_stream_test('rc4');
select * from sp_stream_test('sober128');
-- 2. Auxiliary checks:
-- ####################
-- 2.1. "Counter length (CTR_LENGTH, bytes) may be specified only in CTR mode, default is the size of IV."
select encrypt( 'fooriobar' using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' CTR_LENGTH -123 ) as ctr_clause_case_1 from rdb$database;
select encrypt( 'fooriobar' using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' CTR_LENGTH 0 ) as ctr_clause_case_2 from rdb$database;
select encrypt( 'fooriobar' using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' CTR_LENGTH 16 ) as ctr_clause_case_3 from rdb$database;
select encrypt( 'fooriobar' using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' CTR_LENGTH 123 ) as ctr_clause_case_4 from rdb$database;
select encrypt( 'fooriobar' using AES mode OFB key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' CTR_LENGTH 16 ) as ctr_clause_case_5 from rdb$database;
-- 2.2. "Initial counter value (COUNTER) may be specified only for CHACHA20 cipher, default is 0."
select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter 0 ) from rdb$database;
-- lead to crash, letter 11.11.2019 15:35 --> select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter cast(null as bigint) ) from rdb$database;
select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter 1 ) from rdb$database;
select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter -9223372036854775808 ) from rdb$database;
select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter 9223372036854775807 ) from rdb$database;
-- 2.3. Following query led to crash, see letter to Alex, 30.12.2018 00:15
-- Expected STDERR:
-- Statement failed, SQLSTATE = 22023
-- Invalid key length 9, need 16 or 32
select encrypt('QweRtYUioP' using chacha20 key '192837465' iv '777555333') as invalid_params from rdb$database;
-- 4. "Functions return BLOB when first argument is blob and varbinary for all text types."
set sqlda_display on;
with
d as (
select
cast('Functions return BLOB when first argument is blob and varbinary for all text types.' as blob) as d_blob
,cast('Functions return BLOB when first argument is blob and varbinary for all text types.' as varchar(255) ) as d_char
,x'0154090759DF' as e_bin
from rdb$database
)
select
encrypt( d.d_blob using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' ) as e_blob
,encrypt( d.d_char using AES mode CTR key q'{A8586F1E-DB13-4D}' iv q'{D2FF255D-EDE3-44}' ) as e_char
,decrypt( d.e_bin using sober128 key 'AbcdAbcdAbcdAbcd' iv '01234567') as d_bin
from d
rows 0;
set sqlda_display off;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 11
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 11
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 11
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 11
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 11
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 11
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 11
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 11
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 11
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM AES
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 11
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 12
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 12
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 12
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 12
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 12
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 13
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 13
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 13
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 13
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM ANUBIS
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 13
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 13
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 13
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 13
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 13
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 13
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 15
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 15
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 15
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 15
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM BLOWFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 15
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 14
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 14
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 14
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 14
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 14
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 17
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 17
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 17
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 17
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM KHAZAD
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 17
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 15
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 15
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 15
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 15
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 15
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 19
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 19
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 19
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 19
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC5
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 19
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 17
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 17
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 17
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 17
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 17
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 21
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 21
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 21
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 21
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM RC6
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 21
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 18
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 18
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 18
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 18
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 18
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 23
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 23
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 23
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 23
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM "SAFER+"
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 23
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 19
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 19
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 19
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 19
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 19
ENC_INIT_VECTOR_OCTET_LENGTH 16
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 25
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 25
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 25
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 25
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM TWOFISH
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 25
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 20
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 20
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 20
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 20
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 20
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CBC
ENC_KEY_OCTET_LENGTH 26
ENC_INIT_VECTOR_OCTET_LENGTH 27
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CFB
ENC_KEY_OCTET_LENGTH 26
ENC_INIT_VECTOR_OCTET_LENGTH 27
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE CTR
ENC_KEY_OCTET_LENGTH 26
ENC_INIT_VECTOR_OCTET_LENGTH 27
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE ECB
ENC_KEY_OCTET_LENGTH 26
ENC_INIT_VECTOR_OCTET_LENGTH 27
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545224
ENCRYPTION_ALGORITHM XTEA
ENCRYPTION_MODE OFB
ENC_KEY_OCTET_LENGTH 26
ENC_INIT_VECTOR_OCTET_LENGTH 27
ENCRYPTED_EQUALS_TO_DECRYPTED <null>
ENCRYPTION_FINISH_GDSCODE 335545229
ENCRYPTION_ALGORITHM CHACHA20
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM CHACHA20
ENC_KEY_OCTET_LENGTH 21
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE 335545250
ENCRYPTION_ALGORITHM CHACHA20
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 29
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE 335545240
ENCRYPTION_ALGORITHM RC4
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 0
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC4
ENC_KEY_OCTET_LENGTH 22
ENC_INIT_VECTOR_OCTET_LENGTH 0
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM RC4
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 0
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM SOBER128
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE <null>
ENCRYPTION_ALGORITHM SOBER128
ENC_KEY_OCTET_LENGTH 23
ENC_INIT_VECTOR_OCTET_LENGTH 8
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE 335545230
ENCRYPTION_ALGORITHM SOBER128
ENC_KEY_OCTET_LENGTH 16
ENC_INIT_VECTOR_OCTET_LENGTH 33
ENCRYPTED_EQUALS_TO_DECRYPTED <true>
ENCRYPTION_FINISH_GDSCODE 335545230
CTR_CLAUSE_CASE_2 E813A50C069FC418AA
CTR_CLAUSE_CASE_3 E813A50C069FC418AA
ENCRYPT 8E709DDA89912F172C
ENCRYPT BC3604C147B53D3BDD
ENCRYPT C8051FB1A2581EA9A1
ENCRYPT 2E2298CF4C2B81AD54
INPUT message field count: 0
OUTPUT message field count: 3
01: sqltype: 520 BLOB scale: 0 subtype: 0 len: 8
: name: ENCRYPT alias: E_BLOB
: table: owner:
02: sqltype: 448 VARYING scale: 0 subtype: 0 len: 255 charset: 1 OCTETS
: name: ENCRYPT alias: E_CHAR
: table: owner:
03: sqltype: 448 VARYING scale: 0 subtype: 0 len: 6 charset: 1 OCTETS
: name: DECRYPT alias: D_BIN
: table: owner:
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 22023
Too big counter value -123, maximum 16 can be used
Statement failed, SQLSTATE = 22023
Too big counter value 123, maximum 16 can be used
Statement failed, SQLSTATE = 22023
Counter length/value parameter is not used with mode OFB
Statement failed, SQLSTATE = 22023
Invalid key length 9, need 16 or 32
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
class DynamicArray(object):
def __init__(self, capacity=0):
self.capacity = capacity
if not capacity:
self.arr = None
self.len = 0
else:
self.len = 0
self.arr = []
def size(self):
return self.len
def isEmpty(self):
return self.len == 0
def get(self, index):
if index >= self.len or index < 0:
raise ValueError("Index out of range in get")
return self.arr[index]
def set(self, index, elem):
if index >= self.len or index < 0:
raise ValueError("Index out of range in set")
self.arr[index] = elem
def clear(self):
self.arr = []
self.len = 0
def add(self, elem):
if self.len+1 >= self.capacity:
self.capacity *= 2
self.arr.append(elem)
def removeAt(self, rm_index):
if rm_index >= self.len or rm_index < 0:
raise ValueError("Remove Index out of range in removeAt")
data = self.arr.pop(rm_index)
self.len -= 1
return data
def remove(self, value):
self.arr.remove(value)
def indexOf(self, value):
for i in range(self.len):
if self.arr[i] == value:
return i
return -1
def contains(self, value):
return self.indexOf(value) != -1
def __str__ (self):
s = "["
for i in range(self.len-1):
s += str(self.arr[i])
s += ", "
s += str(self.arr[self.len -1]
s += "]"
|
# Time: O(1)
# Space: O(1)
class Solution(object):
def totalMoney(self, n):
"""
:type n: int
:rtype: int
"""
def arithmetic_sequence_sum(a, d, n):
return (2*a + (n-1)*d) * n //2
cost, day = 1, 7
first_week_cost = arithmetic_sequence_sum(cost, cost, day)
week, remain_day = divmod(n, day)
return arithmetic_sequence_sum(first_week_cost, cost*day, week) + \
arithmetic_sequence_sum(cost*(week+1), cost, remain_day)
|
from arch.univariate.mean import ConstantMean
from arrays import ArrayDateIndex
from api_wrap import ApiWrapper
from garch import MyGARCH
from stock import Stock
from utils import Utils
import datetime
import unittest
class MyTestGarch(unittest.TestCase):
def setUp(self):
# S&P 500
self.company = ApiWrapper.load_prices_json('%5EGSPC')
self.stock = ApiWrapper.get_company_info('%5EGSPC')
self.response = ApiWrapper.execute_request('market/get-movers')
self.garch = MyGARCH(self.stock.rets, self.stock.get_indices())
def test_company_type(self):
self.assertEqual(type(self.company), ArrayDateIndex)
def test_index_type(self):
self.assertEqual(type(self.company.get_indices()[0]), datetime.date)
def test_indexing_in_ADT(self):
self.assertRaises(AssertionError, self.company.__getitem__, -1)
def test_check_names(self):
self.assertRaises(KeyError, Utils.check_empty, self.response, 'wrong_key')
def test_api_loading(self):
self.assertEqual(type(self.response), dict)
def test_info_getting(self):
self.assertEqual(self.stock.mean, Stock(self.company).mean)
def test_volatility_calculations(self):
expected = [0.10749645873578462,
0.4926106590964299,
1.706453379810017]
actual = [self.stock.calculate_volatility(),
self.stock.calculate_volatility(21),
self.stock.calculate_volatility(252)]
self.assertEqual(actual, expected)
def test_indices_equality(self):
self.assertEqual(self.company.get_indices()[1:], self.stock.get_indices())
def test_sharpe_ratio(self):
self.assertAlmostEqual(round(self.stock.get_sharpe_ratio(), 4), 0.1826)
def test_arch_model_type(self):
self.assertEqual(type(self.garch.get_garch_model()), ConstantMean)
def test_garch_fit(self):
self.garch.fit_garch(self.garch.get_garch_model())
self.assertIsNotNone(self.garch.fit)
def test_forecast_horizon(self):
horizon = 7
self.garch.fit_garch(self.garch.get_garch_model())
forecast = self.garch.get_forecast(horizon)
self.assertEqual(horizon, len(forecast))
if __name__ == '__main__':
unittest.main()
|
import json
import os
import time
import numpy as np
import pyUSRP as u
from flask_socketio import emit
from flask_login import current_user
from app import socketio, check_connection, measure_manager, job_manager, app
from diagnostic_text import *
from models import filename_to_abs_path, check_db_measure
from multiprocessing import Lock
import pprint
from .analysis_helper import *
class Analysis_Config(object):
'''
Class for managing analysis configurations.
Act as a bridge between pyUSRP checks and websockets.
'''
def __init__(self, file_list):
self.config = {
'multi':{
'diagnostic':{
'available':0,
'requested':0,
'reason':'Diagnostic is implemented only as a plotting mode.',
'files_noise':[],
'files_vna':[],
'paths_noise':[],
'paths_vna':[],
'override':[],
'parameters':{}
}
},
'single':{
'vna_simple':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'vna_dynamic':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'fitting':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'psd':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'qf':{
'available':0,
'requested':0,
'reason':'Implemented only as a plotting mode',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'qf_psd':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'calibrated_psd':{
'available':0,
'requested':0,
'reason':'Calibration is in development',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'pair_subtraction':{
'available':0,
'requested':0,
'reason':'Pair subtraction is in development',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'calibration':{
'available':0,
'requested':0,
'reason':'Calibration is in development',
'files':[],
'paths':[],
'override':[],
'parameters':{}
},
'delay':{
'available':0,
'requested':0,
'reason':'no files matching criteria',
'files':[],
'paths':[],
'override':[],
'parameters':{}
}
},
'excluded_files':[],
'excluded_paths':[],
'exclusion_reason':[]
}
self.file_list = file_list
self.path_list = []
self.err_list = [] # errors relative to a file
self.convert_filenames()
self.invalid_meas_type = "Invalid measure type defined inside the raw data group: "
self.invalid_meas_db = "Cannot find the measure in the database"
# Alolows thread-safe operations on self.config
# may be usefull when operating in with large amount of files as the checks can be parallelized
self.lock = Lock()
self.joblist = []
def convert_filenames(self):
'''
Take a list of finelames and correlate it with absolute paths.
'''
for f in self.file_list:
err, path = filename_to_abs_path(f)
self.path_list.append(path)
self.err_list.append(err)
def check_if_previously_excluded(self, filename):
'''
If a file has been previously excluded but now it's included, remove from exclusion list.
'''
if filename in self.config['excluded_files']:
index = self.config['excluded_files'].index(filename)
self.config['excluded_files'].remove(filename)
self.config['exclusion_reason'].remove(self.config['exclusion_reason'][index])
self.config['excluded_paths'].remove(self.config['exclusion_reason'][index])
return True
else:
return False
def check_if_already_included(self, filename):
'''
Check if the file is included in any other analysis config.
'''
ret = False
ret = ret or (filename in self.config['multi']['diagnostic']['files_noise'])
ret = ret or (filename in self.config['multi']['diagnostic']['files_vna'])
ret = ret or (filename in self.config['single']['vna_simple']['files'])
ret = ret or (filename in self.config['single']['vna_dynamic']['files'])
ret = ret or (filename in self.config['single']['psd']['files'])
ret = ret or (filename in self.config['single']['qf']['files'])
ret = ret or (filename in self.config['single']['qf_psd']['files'])
ret = ret or (filename in self.config['single']['calibrated_psd']['files'])
ret = ret or (filename in self.config['single']['pair_subtraction']['files'])
ret = ret or (filename in self.config['single']['calibration']['files'])
return ret
def analysis_check_single_file(self, path, err):
'''
Check the analysis one can do on a single file
'''
f_type = u.get_meas_type(path)[0] #assuming one USRP devices!!!
filename = os.path.basename(path)
dir_name = os.path.dirname(os.path.relpath(path, os.path.commonprefix([app.config["GLOBAL_MEASURES_PATH"],path])))
self.lock.acquire()
if check_db_measure(os.path.join(dir_name,filename)) and not err:
try:
f_type.decode('utf-8')=='VNA'
VNA_old_version_flag = True
except AttributeError:
VNA_old_version_flag = False
try:
f_type.decode('utf-8')=='Noise'
Noise_old_version_flag = True
except AttributeError:
Noise_old_version_flag = False
try:
f_type.decode('utf-8')=='delay'
delay_old_version_flag = True
except AttributeError:
delay_old_version_flag = False
# VNA type
if (f_type=='VNA') or VNA_old_version_flag:
self.check_if_previously_excluded(filename)
self.config['single']['vna_simple']['available'] = 1
self.config['single']['vna_simple']['reason'] = ''
self.config['single']['vna_simple']['files'].append(filename)
self.config['single']['vna_simple']['paths'].append(dir_name)
self.config['single']['vna_simple']['override'].append(int(u.is_VNA_analyzed(path)))
self.config['single']['fitting']['available'] = 1
self.config['single']['fitting']['reason'] = ''
self.config['single']['fitting']['files'].append(filename)
self.config['single']['fitting']['paths'].append(dir_name)
self.config['single']['fitting']['override'].append(int(u.has_fit_data(path)))
if u.get_VNA_iterations(path, usrp_number = 0)>1:
# sub-analysis: depend on the VNA group
self.config['single']['vna_dynamic']['available'] = 1
self.config['single']['vna_dynamic']['reason'] = ''
self.config['single']['vna_dynamic']['files'].append(filename)
self.config['single']['vna_dynamic']['paths'].append(dir_name)
self.config['single']['vna_dynamic']['override'].append(int(u.is_VNA_dynamic_analyzed(path)))
# Noise type
elif (f_type=='Noise') or Noise_old_version_flag:
self.check_if_previously_excluded(filename)
self.config['single']['psd']['available'] = 1
self.config['single']['psd']['reason'] = ''
self.config['single']['psd']['files'].append(filename)
self.config['single']['psd']['paths'].append(dir_name)
self.config['single']['psd']['override'].append(int(u.has_noise_group(path)))
if u.has_fit_data(path):
self.check_if_previously_excluded(filename)
#self.config['single']['qf']['available'] = 1
#self.config['single']['qf']['reason'] = ''
#self.config['single']['qf']['files'].append(filename)
#self.config['single']['qf']['override'].append(?)
self.config['single']['qf_psd']['available'] = 1
self.config['single']['qf_psd']['reason'] = ''
self.config['single']['qf_psd']['files'].append(filename)
self.config['single']['qf_psd']['paths'].append(dir_name)
self.config['single']['vna_dynamic']['override'].append(int(u.has_NEF_group(path)))
elif (f_type=='delay') or delay_old_version_flag:
self.config['single']['delay']['available'] = 1
self.config['single']['delay']['reason'] = ''
self.config['single']['delay']['files'].append(filename)
self.config['single']['delay']['paths'].append(dir_name)
self.config['single']['delay']['override'].append(int(u.has_delay_goup(path)))
# TODO add spectrum files
# TODO add raw files
else:
if not self.check_if_already_included(filename):
self.config['excluded_files'].append(filename)
self.config['exclusion_reason'].append(self.invalid_meas_type + f_type)
self.config['excluded_paths'].append(dir_name)
else:
self.config['excluded_files'].append(filename)
self.config['exclusion_reason'].append(self.invalid_meas_db )
self.config['excluded_paths'].append('db query null')
self.lock.release()
def check_diagnostic_association(self, noise_file, vna_file):
'''
check if two files are compatible for diagnotic plots. will be updated when the noise H5 files carry the VNA name.
'''
try:
procede_flag = False
if (u.get_meas_type(noise_file)[0] == "Noise") and (u.get_meas_type(vna_file)[0] == "VNA"): procede_flag = True
try:
if (u.get_meas_type(noise_file)[0].decode('utf-8') == "Noise") and (u.get_meas_type(vna_file)[0].decode('utf-8') == "VNA"):procede_flag = True
except AttributeError:
pass
if procede_flag:
# Quite stochastic...
reso_n = list(u.bound_open(noise_file)['Resonators']['reso_0']['fitted_S21'][:20:3])
reso_v = list(u.bound_open(vna_file)['Resonators']['reso_0']['fitted_S21'][:20:3])
ret = (reso_n == reso_v)
else:
ret = False
except KeyError:
ret = False
except OSError:
print_warning("file not found in check_diagnostic_association")
ret = False
except ValueError:
ret = False
return ret
def check_file_list(self):
'''
wrapper function for analysis check file.
'''
for i in range(len(self.path_list)):
self.analysis_check_single_file(self.path_list[i], self.err_list[i])
'''# Dianostic is implemented only as a plotting mode
self.lock.acquire()
for j in range(len(self.path_list)):
if self.check_diagnostic_association(self.path_list[i], self.path_list[j]):
self.check_if_previously_excluded(self.path_list[i])
self.check_if_previously_excluded(self.path_list[j])
self.config['multi']['diagnostic']['available'] = 1
self.config['multi']['diagnostic']['reason'] = ''
self.config['multi']['diagnostic']['files_noise'].append(self.file_list[i])
self.config['multi']['diagnostic']['files_vna'].append(self.file_list[j])
self.config['multi']['diagnostic']['noise_paths'].append(dir_name)!!!
self.lock.release()
'''
# at this point the config is done
def pprint(self):
'''
Diagnostic for the analisis config dictionary. Just print it nicely
'''
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(self.config)
def update_configuration(self, config):
'''
For now it's just a variable update. TODO Will include cross-check on parameters to avoid invalid configurations
'''
self.config = config
def find_dependencies(self, filename):
'''
Find if the file has multiple analysis jobs assigned and return job name dependance (hopefully)
'''
#backward run through the list
rev_list = self.joblist[::-1]
depends = None
for i in range(len(rev_list)):
if rev_list[i]['filename'] == filename:
depends = rev_list[i]['name']
break
return depends
def build_job_queue(self):
'''
Build the measure list of dict.
'''
for key in self.config['single'].keys():
if int(self.config['single'][key]['requested']):
for i in range(len(self.config['single'][key]['files'])):
self.joblist.append({
'type':key,
# The absolute path for the redis worker
'file':os.path.join(app.config["GLOBAL_MEASURES_PATH"],os.path.join(self.config['single'][key]['paths'][i],self.config['single'][key]['files'][i])),
'arguments':self.config['single'][key]['parameters'],
# will be taken care in future development
'depends':self.find_dependencies(self.config['single'][key]['files'][i]),
'filename':self.config['single'][key]['files'][i],
# note that the _on_ character is used for discriminating read mode file access and managing dependencies
'name': "%s_on_%s" % (key, self.config['single'][key]['files'][i])
})
def sort_job_queue(self):
'''
Define the order of the measure queue.
For now consecutive measures are not developed so this function is used but empty
'''
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(self.joblist)
def enqueue_jobs(self):
'''
Enqueue each analysis job
'''
for i in range(len(self.joblist)):
submit_job_wrapper(self.joblist[i])
#job_manager.submit_job(init_dry_run, arguments = arguments, name = name, depends = None)
|
"Cloud service related functionality."
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent.futures
import requests
import json
OK = 0
ERROR = 1
CONNECT_ERROR = 2
AUTH_ERROR = 3
UPLOAD_ERROR = 4
class Logger(object):
def register_tuner(self, tuner_state):
"""Informs the logger that a new search is starting."""
raise NotImplementedError
def register_trial(self, trial_id, trial_state):
"""Informs the logger that a new Trial is starting."""
raise NotImplementedError
def report_trial_state(self, trial_id, trial_state):
"""Gives the logger information about trial status."""
raise NotImplementedError
def exit(self):
raise NotImplementedError
def url_join(*parts):
return '/'.join(map(lambda fragment: fragment.rstrip('/'), parts))
def send_to_backend(url, data, key):
response = requests.post(
url,
headers={'X-AUTH': key},
json=data)
if not response.ok:
try:
response_json = response.json()
except json.decoder.JSONDecodeError:
print('Cloud service down -- data not uploaded: %s' % response.text)
return CONNECT_ERROR
if response_json['status'] == 'Unauthorized':
print('Invalid backend API key.')
return AUTH_ERROR
else:
print('Warning! Cloud service upload failed: %s' % response.text)
return UPLOAD_ERROR
return ERROR
else:
return OK
class CloudLogger(Logger):
def __init__(self, api_key):
self.api_key = api_key
self.log_interval = 5
self._base_url = (
'https://us-central1-kerastuner-prod.cloudfunctions.net/api/')
self._last_update = -1
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)
self._search_id = None
self._async = True
def register_tuner(self, tuner_state):
data = {
'tuner_state': tuner_state,
}
self._send_to_backend('register_tuner', data)
def register_trial(self, trial_id, trial_state):
"""Informs the logger that a new Trial is starting."""
data = {
'trial_id': trial_id,
'trial_state': trial_state,
}
self._send_to_backend('register_trial', data)
def report_trial_state(self, trial_id, trial_state):
"""Gives the logger information about trial status."""
data = {
'trial_id': trial_id,
'trial_state': trial_state,
}
self._send_to_backend('report_trial_state', data)
def exit(self):
"""Makes sure that all cloud requests have been sent."""
self._executor.shutdown(wait=True)
# In case the user wants to do multiple hypertuning sessions,
# we open another process pool.
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)
def _send_to_backend(self, route, data):
url = url_join(self._base_url, route)
if self._async:
self._executor.submit(send_to_backend,
url,
data,
self.api_key)
else:
send_to_backend(url, data, self.api_key)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.