hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d76ce23e6329b8ee23b0d81f29ac3bdab0a9a8f3
| 10,755
|
py
|
Python
|
chives/wallet/wallet_coin_store.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 75
|
2021-06-27T03:30:59.000Z
|
2022-03-20T12:32:55.000Z
|
chives/wallet/wallet_coin_store.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 39
|
2021-07-02T07:11:24.000Z
|
2022-03-20T15:00:07.000Z
|
chives/wallet/wallet_coin_store.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 41
|
2021-06-24T11:24:43.000Z
|
2022-03-14T16:11:38.000Z
|
from typing import Dict, List, Optional, Set
import aiosqlite
import sqlite3
from chives.types.blockchain_format.coin import Coin
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.util.db_wrapper import DBWrapper
from chives.util.ints import uint32, uint64
from chives.wallet.util.wallet_types import WalletType
from chives.wallet.wallet_coin_record import WalletCoinRecord
class WalletCoinStore:
"""
This object handles CoinRecords in DB used by wallet.
"""
db_connection: aiosqlite.Connection
# coin_record_cache keeps ALL coin records in memory. [record_name: record]
coin_record_cache: Dict[bytes32, WalletCoinRecord]
# unspent_coin_wallet_cache keeps ALL unspent coin records for wallet in memory [wallet_id: [record_name: record]]
unspent_coin_wallet_cache: Dict[int, Dict[bytes32, WalletCoinRecord]]
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_height bigint,"
" spent_height bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" wallet_type int,"
" wallet_id int)"
)
)
# Useful for reorg lookups
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_height on coin_record(confirmed_height)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS coin_spent_height on coin_record(spent_height)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS coin_puzzlehash on coin_record(puzzle_hash)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on coin_record(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on coin_record(wallet_id)")
await self.db_connection.commit()
self.coin_record_cache = {}
self.unspent_coin_wallet_cache = {}
await self.rebuild_wallet_cache()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM coin_record")
await cursor.close()
await self.db_connection.commit()
async def rebuild_wallet_cache(self):
# First update all coins that were reorged, then re-add coin_records
all_coins = await self.get_all_coins()
self.unspent_coin_wallet_cache = {}
self.coin_record_cache = {}
for coin_record in all_coins:
name = coin_record.name()
self.coin_record_cache[name] = coin_record
if coin_record.spent is False:
if coin_record.wallet_id not in self.unspent_coin_wallet_cache:
self.unspent_coin_wallet_cache[coin_record.wallet_id] = {}
self.unspent_coin_wallet_cache[coin_record.wallet_id][name] = coin_record
# Store CoinRecord in DB and ram cache
async def add_coin_record(self, record: WalletCoinRecord) -> None:
# update wallet cache
name = record.name()
self.coin_record_cache[name] = record
if record.wallet_id in self.unspent_coin_wallet_cache:
if record.spent and name in self.unspent_coin_wallet_cache[record.wallet_id]:
self.unspent_coin_wallet_cache[record.wallet_id].pop(name)
if not record.spent:
self.unspent_coin_wallet_cache[record.wallet_id][name] = record
else:
if not record.spent:
self.unspent_coin_wallet_cache[record.wallet_id] = {}
self.unspent_coin_wallet_cache[record.wallet_id][name] = record
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
name.hex(),
record.confirmed_block_height,
record.spent_block_height,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.wallet_type,
record.wallet_id,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def set_spent(self, coin_name: bytes32, height: uint32) -> WalletCoinRecord:
current: Optional[WalletCoinRecord] = await self.get_coin_record(coin_name)
assert current is not None
# assert current.spent is False
spent: WalletCoinRecord = WalletCoinRecord(
current.coin,
current.confirmed_block_height,
height,
True,
current.coinbase,
current.wallet_type,
current.wallet_id,
)
await self.add_coin_record(spent)
return spent
def coin_record_from_row(self, row: sqlite3.Row) -> WalletCoinRecord:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
return WalletCoinRecord(
coin, uint32(row[1]), uint32(row[2]), bool(row[3]), bool(row[4]), WalletType(row[8]), row[9]
)
async def get_coin_record(self, coin_name: bytes32) -> Optional[WalletCoinRecord]:
"""Returns CoinRecord with specified coin id."""
if coin_name in self.coin_record_cache:
return self.coin_record_cache[coin_name]
cursor = await self.db_connection.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return self.coin_record_from_row(row)
async def get_first_coin_height(self) -> Optional[uint32]:
"""Returns height of first confirmed coin"""
cursor = await self.db_connection.execute("SELECT MIN(confirmed_height) FROM coin_record;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_unspent_coins_at_height(self, height: Optional[uint32] = None) -> Set[WalletCoinRecord]:
"""
Returns set of CoinRecords that have not been spent yet. If a height is specified,
We can also return coins that were unspent at this height (but maybe spent later).
Finally, the coins must be confirmed at the height or less.
"""
if height is None:
all_unspent = set()
for name, coin_record in self.coin_record_cache.items():
if coin_record.spent is False:
all_unspent.add(coin_record)
return all_unspent
else:
all_unspent = set()
for name, coin_record in self.coin_record_cache.items():
if (
coin_record.spent is False
or coin_record.spent_block_height > height >= coin_record.confirmed_block_height
):
all_unspent.add(coin_record)
return all_unspent
async def get_unspent_coins_for_wallet(self, wallet_id: int) -> Set[WalletCoinRecord]:
"""Returns set of CoinRecords that have not been spent yet for a wallet."""
if wallet_id in self.unspent_coin_wallet_cache:
wallet_coins: Dict[bytes32, WalletCoinRecord] = self.unspent_coin_wallet_cache[wallet_id]
return set(wallet_coins.values())
else:
return set()
async def get_all_coins(self) -> Set[WalletCoinRecord]:
"""Returns set of all CoinRecords."""
cursor = await self.db_connection.execute("SELECT * from coin_record")
rows = await cursor.fetchall()
await cursor.close()
return set(self.coin_record_from_row(row) for row in rows)
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(self, puzzle_hash: bytes32) -> List[WalletCoinRecord]:
"""Returns a list of all coin records with the given puzzle hash"""
cursor = await self.db_connection.execute("SELECT * from coin_record WHERE puzzle_hash=?", (puzzle_hash.hex(),))
rows = await cursor.fetchall()
await cursor.close()
return [self.coin_record_from_row(row) for row in rows]
async def rollback_to_block(self, height: int):
"""
Rolls back the blockchain to block_index. All blocks confirmed after this point
are removed from the LCA. All coins confirmed after this point are removed.
All coins spent after this point are set to unspent. Can be -1 (rollback all)
"""
# Delete from storage
delete_queue: List[WalletCoinRecord] = []
for coin_name, coin_record in self.coin_record_cache.items():
if coin_record.spent_block_height > height:
new_record = WalletCoinRecord(
coin_record.coin,
coin_record.confirmed_block_height,
uint32(0),
False,
coin_record.coinbase,
coin_record.wallet_type,
coin_record.wallet_id,
)
self.coin_record_cache[coin_record.coin.name()] = new_record
self.unspent_coin_wallet_cache[coin_record.wallet_id][coin_record.coin.name()] = new_record
if coin_record.confirmed_block_height > height:
delete_queue.append(coin_record)
for coin_record in delete_queue:
self.coin_record_cache.pop(coin_record.coin.name())
if coin_record.wallet_id in self.unspent_coin_wallet_cache:
coin_cache = self.unspent_coin_wallet_cache[coin_record.wallet_id]
if coin_record.coin.name() in coin_cache:
coin_cache.pop(coin_record.coin.name())
c1 = await self.db_connection.execute("DELETE FROM coin_record WHERE confirmed_height>?", (height,))
await c1.close()
c2 = await self.db_connection.execute(
"UPDATE coin_record SET spent_height = 0, spent = 0 WHERE spent_height>?",
(height,),
)
await c2.close()
| 43.02
| 120
| 0.637378
|
624b7df31768d4ca00cf3a9b3dea7802d6077b70
| 7,213
|
py
|
Python
|
object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py
|
ParikhKadam/ConvNeXt
|
d1fa8f6fef0a165b27399986cc2bdacc92777e40
|
[
"MIT"
] | 3,453
|
2022-01-11T01:49:27.000Z
|
2022-03-31T12:35:56.000Z
|
object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py
|
ParikhKadam/ConvNeXt
|
d1fa8f6fef0a165b27399986cc2bdacc92777e40
|
[
"MIT"
] | 80
|
2022-01-11T10:03:13.000Z
|
2022-03-31T05:22:48.000Z
|
object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py
|
ParikhKadam/ConvNeXt
|
d1fa8f6fef0a165b27399986cc2bdacc92777e40
|
[
"MIT"
] | 398
|
2022-01-11T02:42:28.000Z
|
2022-03-31T06:30:47.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# model settings
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.2,
layer_scale_init_value=1e-6,
out_indices=[0, 1, 2, 3],
),
neck=dict(
type='FPN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 34.511962
| 79
| 0.45931
|
5a96852e2bc3a9229a0254883aaa5ed0f985114c
| 709
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scattermapbox/_legendgrouptitle.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/scattermapbox/_legendgrouptitle.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/scattermapbox/_legendgrouptitle.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="legendgrouptitle", parent_name="scattermapbox", **kwargs
):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs
)
| 30.826087
| 83
| 0.586742
|
d7a421a7fc783b47d6036469c88d8d5af88e98bc
| 8,437
|
py
|
Python
|
joystick/pubsub_servo.py
|
quintest/Cloud-IoT-Core-Kit-Examples
|
ff9726f31ce5c69f03f59b43a66efef9b04fbb85
|
[
"Apache-2.0"
] | 66
|
2017-10-16T09:33:05.000Z
|
2022-02-22T18:29:30.000Z
|
joystick/pubsub_servo.py
|
quintest/Cloud-IoT-Core-Kit-Examples
|
ff9726f31ce5c69f03f59b43a66efef9b04fbb85
|
[
"Apache-2.0"
] | 5
|
2017-10-23T01:39:02.000Z
|
2018-06-14T17:41:26.000Z
|
joystick/pubsub_servo.py
|
quintest/Cloud-IoT-Core-Kit-Examples
|
ff9726f31ce5c69f03f59b43a66efef9b04fbb85
|
[
"Apache-2.0"
] | 30
|
2017-09-27T13:55:25.000Z
|
2022-03-11T12:41:18.000Z
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import time
import ssl
import jwt
import paho.mqtt.client as mqtt
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from subprocess import call
# Software SPI configuration:
CLK = 12
MISO = 23
MOSI = 24
CS = 25
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
servoMin = 50
servoMax = 250
servoSteps = servoMax - servoMin
stickSensitivity = 5 # the lower the number the more sensitive we are to stick changes that transmit a message
stickToServoPositionRatio = 1024/servoSteps # assume 10bit ADC
#Servo settings
pwmGPIO = "18"
pwmClock = "192"
pwmRange = "2000"
# Update and publish readings at a rate of SENSOR_POLL per second.
SENSOR_POLL=2
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print 'Creating JWT using {} from private key file {}'.format(
algorithm, private_key_file)
return jwt.encode(token, private_key, algorithm=algorithm)
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
class Device(object):
"""Represents the state of a single device."""
def __init__(self):
#self.leftright = 512
#self.updown = 512
self.servoStep = 150
self.connected = False
def update_sensor_data(self):
#self.leftright = mcp.read_adc(0)
#self.updown = mcp.read_adc(1)
leftRightServoStep = mcp.read_adc(0)/stickToServoPositionRatio
leftRightServoStep = (leftRightServoStep/stickSensitivity)*stickSensitivity
leftRightServoStep = leftRightServoStep + servoMin
#print 'leftRightServoStep', leftRightServoStep
#poll until the stick moves
# while leftRightServoStep == self.servoStep:
# leftRightServoStep = mcp.read_adc(0)/stickToServoPositionRatio
# leftRightServoStep = (leftRightServoStep/stickSensitivity)*stickSensitivity
# leftRightServoStep = leftRightServoStep + servoMin
#print 'leftRightServoStep', leftRightServoStep
self.servoStep = leftRightServoStep
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print 'Connection Result:', error_str(rc)
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
print 'Disconnected:', error_str(rc)
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
print 'Published message acked.'
def on_subscribe(self, unused_client, unused_userdata, unused_mid,
granted_qos):
"""Callback when the device receives a SUBACK from the MQTT bridge."""
print 'Subscribed: ', granted_qos
if granted_qos[0] == 128:
print 'Subscription failed.'
def on_message(self, unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = message.payload
print "Received message '{}' on topic '{}' with Qos {}".format(
payload, message.topic, str(message.qos))
# The device will receive its latest config when it subscribes to the config
# topic. If there is no configuration for the device, the device will
# receive an config with an empty payload.
if not payload:
print 'no payload'
return
# The config is passed in the payload of the message. In this example, the
# server sends a serialized JSON string.
data = json.loads(payload)
if data['servoStep']:
# If we're changing the servo position, print a message and update our
# internal state.
self.servoStep = data['servoStep']
if self.servoStep:
print 'ServoStep', self.servoStep
# move the servo to new position and respond with new position
err = call(["gpio", "-g", "pwm", pwmGPIO, str(data['servoStep'])])
if err != 0:
print "Couldn't move servo, error:", err
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Example Google Cloud IoT MQTT device connection code.')
parser.add_argument(
'--project_id', required=True, help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True, help='Cloud IoT registry id')
parser.add_argument('--device_id', required=True, help='Cloud IoT device id')
parser.add_argument(
'--private_key_file', required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help='CA root certificate. Get from https://pki.google.com/roots.pem')
parser.add_argument(
'--num_messages',
type=int,
default=100,
help='Number of messages to publish.')
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port', default=8883, help='MQTT bridge port.')
return parser.parse_args()
def main():
args = parse_command_line_args()
#setup PWM for servo
err = call(["gpio", "-g", "mode", pwmGPIO, "pwm"])
err |= call(["gpio", "pwm-ms"])
err |= call(["gpio", "pwmc", pwmClock])
err |= call(["gpio", "pwmr", pwmRange])
if err != 0:
print "gpio setup error:", err
quit()
# Create our MQTT client and connect to Cloud IoT.
client = mqtt.Client(
client_id='projects/{}/locations/{}/registries/{}/devices/{}'.format(
args.project_id, args.cloud_region, args.registry_id, args.device_id))
client.username_pw_set(
username='unused',
password=create_jwt(args.project_id, args.private_key_file,
args.algorithm))
client.tls_set(ca_certs=args.ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
device = Device()
client.on_connect = device.on_connect
# client.on_publish = device.on_publish
client.on_disconnect = device.on_disconnect
client.on_subscribe = device.on_subscribe
client.on_message = device.on_message
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.loop_start()
# This is the topic that the device will publish telemetry events to.
mqtt_telemetry_topic = '/devices/{}/events'.format(args.device_id)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(args.device_id)
# Wait up to 5 seconds for the device to connect.
device.wait_for_connection(5)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
# Update and publish stick position readings at a rate of one per SENSOR_POLL but poll the sensor for "stickSensitivity" changes.
while True:
pass
# time.sleep(SENSOR_POLL)
client.disconnect()
client.loop_stop()
print 'Finished loop successfully. Goodbye!'
if __name__ == '__main__':
main()
| 33.883534
| 131
| 0.704516
|
444085a96450ab535a70361431cc21a071f0c81f
| 11,132
|
py
|
Python
|
galpy/potential/MN3ExponentialDiskPotential.py
|
gusbeane/galpy
|
d6db971285f163456c81775fc2fdc7d75189762c
|
[
"BSD-3-Clause"
] | 147
|
2015-01-01T14:06:17.000Z
|
2022-03-24T14:47:41.000Z
|
galpy/potential/MN3ExponentialDiskPotential.py
|
gusbeane/galpy
|
d6db971285f163456c81775fc2fdc7d75189762c
|
[
"BSD-3-Clause"
] | 269
|
2015-01-07T15:58:31.000Z
|
2022-03-30T18:42:08.000Z
|
galpy/potential/MN3ExponentialDiskPotential.py
|
gusbeane/galpy
|
d6db971285f163456c81775fc2fdc7d75189762c
|
[
"BSD-3-Clause"
] | 110
|
2015-02-08T10:57:24.000Z
|
2021-12-28T07:56:49.000Z
|
###############################################################################
# MN3ExponentialDiskPotential.py: class that implements the three Miyamoto-
# Nagai approximation to a radially
# exponential disk potential of Smith et al.
# 2015
###############################################################################
import warnings
import numpy
from ..util import galpyWarning, conversion
from .Potential import Potential, kms_to_kpcGyrDecorator
from .MiyamotoNagaiPotential import MiyamotoNagaiPotential
class MN3ExponentialDiskPotential(Potential):
"""class that implements the three Miyamoto-Nagai approximation to a radially-exponential disk potential of `Smith et al. 2015 <http://adsabs.harvard.edu/abs/2015arXiv150200627S>`_
.. math::
\\rho(R,z) = \\mathrm{amp}\\,\\exp\\left(-R/h_R-|z|/h_z\\right)
or
.. math::
\\rho(R,z) = \\mathrm{amp}\\,\\exp\\left(-R/h_R\\right)\\mathrm{sech}^2\\left(-|z|/h_z\\right)
depending on whether sech=True or not. This density is approximated using three Miyamoto-Nagai disks
"""
def __init__(self,amp=1.,hr=1./3.,hz=1./16.,
sech=False,posdens=False,
normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a 3MN approximation to an exponential disk potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass density or Gxmass density
hr - disk scale-length (can be Quantity)
hz - scale-height (can be Quantity)
sech= (False) if True, hz is the scale height of a sech vertical profile (default is exponential vertical profile)
posdens= (False) if True, allow only positive density solutions (Table 2 in Smith et al. rather than Table 1)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
MN3ExponentialDiskPotential object
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='density')
hr= conversion.parse_length(hr,ro=self._ro)
hz= conversion.parse_length(hz,ro=self._ro)
self._hr= hr
self._hz= hz
self._scale= self._hr
# Adjust amp for definition
self._amp*= 4.*numpy.pi*self._hr**2.*self._hz
# First determine b/rd
if sech:
self._brd= _b_sechhz(self._hz/self._hr)
else:
self._brd= _b_exphz(self._hz/self._hr)
if self._brd < 0.:
raise IOError("MN3ExponentialDiskPotential's b/Rd is negative for the given hz")
# Check range
if (not posdens and self._brd > 3.) \
or (posdens and self._brd > 1.35):
warnings.warn("MN3ExponentialDiskPotential's b/Rd = %g is outside of the interpolation range of Smith et al. (2015)" % self._brd,
galpyWarning)
self._b= self._brd*self._hr
# Now setup the various MN disks
if posdens:
self._mn3= [MiyamotoNagaiPotential(amp=_mass1_tab2(self._brd),
a=_a1_tab2(self._brd)*self._hr,
b=self._b),
MiyamotoNagaiPotential(amp=_mass2_tab2(self._brd),
a=_a2_tab2(self._brd)*self._hr,
b=self._b),
MiyamotoNagaiPotential(amp=_mass3_tab2(self._brd),
a=_a3_tab2(self._brd)*self._hr,
b=self._b)]
else:
self._mn3= [MiyamotoNagaiPotential(amp=_mass1_tab1(self._brd),
a=_a1_tab1(self._brd)*self._hr,
b=self._b),
MiyamotoNagaiPotential(amp=_mass2_tab1(self._brd),
a=_a2_tab1(self._brd)*self._hr,
b=self._b),
MiyamotoNagaiPotential(amp=_mass3_tab1(self._brd),
a=_a3_tab1(self._brd)*self._hr,
b=self._b)]
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
self.hasC= True
self.hasC_dxdv= True
self._nemo_accname= 'MiyamotoNagai+MiyamotoNagai+MiyamotoNagai'
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0](R,z,phi=phi,t=t)\
+self._mn3[1](R,z,phi=phi,t=t)\
+self._mn3[2](R,z,phi=phi,t=t)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].Rforce(R,z,phi=phi,t=t)\
+self._mn3[1].Rforce(R,z,phi=phi,t=t)\
+self._mn3[2].Rforce(R,z,phi=phi,t=t)
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].zforce(R,z,phi=phi,t=t)\
+self._mn3[1].zforce(R,z,phi=phi,t=t)\
+self._mn3[2].zforce(R,z,phi=phi,t=t)
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].dens(R,z,phi=phi,t=t)\
+self._mn3[1].dens(R,z,phi=phi,t=t)\
+self._mn3[2].dens(R,z,phi=phi,t=t)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].R2deriv(R,z,phi=phi,t=t)\
+self._mn3[1].R2deriv(R,z,phi=phi,t=t)\
+self._mn3[2].R2deriv(R,z,phi=phi,t=t)
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[1].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[2].z2deriv(R,z,phi=phi,t=t)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].Rzderiv(R,z,phi=phi,t=t)\
+self._mn3[1].Rzderiv(R,z,phi=phi,t=t)\
+self._mn3[2].Rzderiv(R,z,phi=phi,t=t)
@kms_to_kpcGyrDecorator
def _nemo_accpars(self,vo,ro):
"""
NAME:
_nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2015-02-09 - Written - Bovy (IAS)
"""
out= ""
# Loop through the self._mn3 MN potentials
for ii in range(3):
if ii > 0: out+= '#'
ampl= self._amp*self._mn3[ii]._amp*vo**2.*ro
out+= "0,%s,%s,%s" % (ampl,self._mn3[ii]._a*ro,self._mn3[ii]._b*ro)
return out
# Equations from Table 1
def _mass1_tab1(brd):
return -0.0090*brd**4.+0.0640*brd**3.-0.1653*brd**2.+0.1164*brd+1.9487
def _mass2_tab1(brd):
return 0.0173*brd**4.-0.0903*brd**3.+0.0877*brd**2.+0.2029*brd-1.3077
def _mass3_tab1(brd):
return -0.0051*brd**4.+0.0287*brd**3.-0.0361*brd**2.-0.0544*brd+0.2242
def _a1_tab1(brd):
return -0.0358*brd**4.+0.2610*brd**3.-0.6987*brd**2.-0.1193*brd+2.0074
def _a2_tab1(brd):
return -0.0830*brd**4.+0.4992*brd**3.-0.7967*brd**2.-1.2966*brd+4.4441
def _a3_tab1(brd):
return -0.0247*brd**4.+0.1718*brd**3.-0.4124*brd**2.-0.5944*brd+0.7333
# Equations from Table 2
def _mass1_tab2(brd):
return 0.0036*brd**4.-0.0330*brd**3.+0.1117*brd**2.-0.1335*brd+0.1749
def _mass2_tab2(brd):
return -0.0131*brd**4.+0.1090*brd**3.-0.3035*brd**2.+0.2921*brd-5.7976
def _mass3_tab2(brd):
return -0.0048*brd**4.+0.0454*brd**3.-0.1425*brd**2.+0.1012*brd+6.7120
def _a1_tab2(brd):
return -0.0158*brd**4.+0.0993*brd**3.-0.2070*brd**2.-0.7089*brd+0.6445
def _a2_tab2(brd):
return -0.0319*brd**4.+0.1514*brd**3.-0.1279*brd**2.-0.9325*brd+2.6836
def _a3_tab2(brd):
return -0.0326*brd**4.+0.1816*brd**3.-0.2943*brd**2.-0.6329*brd+2.3193
# Equations to go from hz to b
def _b_exphz(hz):
return -0.269*hz**3.+1.080*hz**2.+1.092*hz
def _b_sechhz(hz):
return -0.033*hz**3.+0.262*hz**2.+0.659*hz
| 34.571429
| 184
| 0.525063
|
5a91fc22ecd276ad5d72af8aa7197bd1184e99f6
| 2,373
|
py
|
Python
|
python/colorpicker.py
|
EnviralDesign/UberGui
|
a589616beeb5d4646cb493edacc37606407fcf7f
|
[
"MIT"
] | 43
|
2021-02-03T04:12:45.000Z
|
2022-03-20T13:28:39.000Z
|
python/colorpicker.py
|
EnviralDesign/UberGui
|
a589616beeb5d4646cb493edacc37606407fcf7f
|
[
"MIT"
] | 39
|
2021-02-05T00:45:44.000Z
|
2021-09-28T09:26:44.000Z
|
python/colorpicker.py
|
EnviralDesign/UberGui
|
a589616beeb5d4646cb493edacc37606407fcf7f
|
[
"MIT"
] | 2
|
2021-02-05T03:55:24.000Z
|
2021-02-09T03:49:14.000Z
|
class colorpicker:
def __init__(self, ownerComp):
# The component to which this extension is attached
self.ownerComp = ownerComp
self.hsvChop = op('null_hsv')
self.hsvDat = op('table_hsv')
self.h_comp = op('hue')
self.sv_comp = op('hue')
def Launch(self, OPS , PAR, LEFT, RIGHT, BOTTOM, TOP):
# Launch the color picker setting the operator, par, and position in the process.
parent.colorpicker.par.Ops = OPS
parent.colorpicker.par.Par = PAR
parent.colorpicker.par.w = RIGHT - LEFT
parent.colorpicker.par.h = parent.colorpicker.par.w + op('hue').height
parent.colorpicker.par.x = LEFT
parent.colorpicker.par.y = max( 0 , (parent.Widget.height - BOTTOM - parent.colorpicker.par.h) )
parent.colorpicker.par.display = 1
ipar.Widget.Ismodified = 0
self.hsvChop.cook(force=True)
h = self.hsvChop['h']
s = self.hsvChop['s']
v = self.hsvChop['v']
self.hsvDat['h',1] = h
self.hsvDat['s',1] = s
self.hsvDat['v',1] = v
parent.Widget.op('container_foreground_focus').par.display = 1
def Close(self):
# closes the color picker. will still trigger param change callback if value has changed.
parent.colorpicker.par.display = 0
# isModified = parent.colorpicker.fetch('isModified' , 0)
isModified = ipar.Widget.Ismodified.eval()
if isModified == 1:
OPS = parent.colorpicker.par.Ops.eval()
PAR = parent.colorpicker.par.Par.eval()
foundPar = getattr( op(OPS[0]).par , PAR , ":ERR:" )
initPars = []
if foundPar != ":ERR:":
initPars = [ foundPar ]
parent.Widget.ParamChange(pars=initPars)
ipar.Widget.Ismodified = 0
parent.Widget.op('container_foreground_focus').par.display = 0
def Set(self,r,g,b):
# given a set of r/g/b values, set the custom parameters on the target objects.
# this change will force the color picker graphics to update, avoiding cyclic depend stuff.
OPS = parent.colorpicker.par.Ops.eval()
PAR = parent.colorpicker.par.Par.eval()
vals = [ r,g,b ]
for each in OPS:
for i,parName in enumerate(op('null_src_par').rows()):
parameter = getattr( op(each).par , parName[0].val , ':PAR_ERR:' )
if parameter != ':PAR_ERR:':
parameter.val = vals[i]
# parent.colorpicker.store('isModified' , 1)
ipar.Widget.Ismodified = 1
# parent.Widget.op('container_foreground_focus').par.display = 0
| 29.6625
| 98
| 0.672145
|
26b4cc404806e1ebf44af4c6fed10b4cd24f7a2f
| 3,634
|
py
|
Python
|
tests/test_commands.py
|
kolonialno/zplgen
|
31ac3ac9361bea39151e07885fc50eb2dec6f851
|
[
"MIT"
] | 5
|
2017-05-09T14:08:58.000Z
|
2018-02-13T21:33:39.000Z
|
tests/test_commands.py
|
kolonialno/zplgen
|
31ac3ac9361bea39151e07885fc50eb2dec6f851
|
[
"MIT"
] | null | null | null |
tests/test_commands.py
|
kolonialno/zplgen
|
31ac3ac9361bea39151e07885fc50eb2dec6f851
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from __future__ import unicode_literals
from unittest import TestCase
from zplgen import Command, Font
class CommandsTestCase(TestCase):
"""
Tests for the interesting command types.
"""
def test_field_encoding(self):
"""
Tests that Command.field handles unicode characters by encoding
in cp1252 and returning the command as the bytes type.
"""
data = 'håhå'
expected_bytes = (
'^FO0,0' '^FD' + data + '^FS'
).encode('cp1252')
encoded_field = Command.field(data)
self.assertIsInstance(encoded_field, bytes)
self.assertEqual(encoded_field, expected_bytes)
def test_field_invalid_char_handling(self):
"""
Tests that characters outside the allowed cp1252 charset is handled.
"""
data = '我爱你ćdata'
expected_bytes = (
'^FO0,0' '^FD' '????data' '^FS'
).encode('cp1252')
encoded_field = Command.field(data)
self.assertIsInstance(encoded_field, bytes)
self.assertEqual(encoded_field, expected_bytes)
def test_field_minimal(self):
"""
Tests the minimal set of arguments to Command.field.
"""
data = 'data'
expected_command = (
'^FO0,0' '^FD' + data + '^FS'
).encode('cp1252')
minimal_field = Command.field(data)
self.assertEqual(minimal_field, expected_command)
def test_field_maximal(self):
"""
Tests that Command.field generates the expected set of commands
when given the full set of arguments.
"""
data = '123'
x = 12
y = 13
font = Font('0', 10)
block = '^FB200,1'
barcode = '^BE3,1'
invert = '^FR'
expected_command = (
font +
'^FO{},{}'.format(x, y) +
block +
barcode +
invert +
'^FD{}'.format(data) +
'^FS'
).encode('cp1252')
maximal_field = Command.field(
data, x=x, y=y,
font=font, block=block,
barcode=barcode, invert=True,
)
self.assertEqual(maximal_field, expected_command)
def test_argument_trimming(self):
"""
Tests that insignificant argument trimming works as expected for
the Command.block method.
"""
# If given only the first arg, the trailing commas should not
# be included.
width = '100'
expected_trimmed_command = (
'^FB' + width
).encode('cp1252')
trimmed_command = Command.block(width)
self.assertEqual(trimmed_command, expected_trimmed_command)
# If given extra args, the commas should not be trimmed
extra_args = (2, 3, 'R', 2)
expected_untrimmed_command = (
'^FB{},{},{},{},{}'.format(width, *extra_args)
).encode('cp1252')
untrimmed_command = Command.block(width, *extra_args)
self.assertEqual(untrimmed_command, expected_untrimmed_command)
class FontTestCase(TestCase):
"""
Tests the essential functionality of the Font utility class.
"""
def setUp(self):
self.font = Font('0', 3, 4, orientation='N')
def test_str(self):
"Tests that the string representation uses the default height/width."
self.assertEqual(self.font, '^A0N,3,4')
self.assertEqual(str(self.font), '^A0N,3,4')
def test_call(self):
"Tests that calling the object scales it."
self.assertEqual(self.font(100, 200), '^A0N,100,200')
| 25.77305
| 77
| 0.5776
|
fcc1890a1fc31559a23c12f7dc48988ffc0c8724
| 1,422
|
py
|
Python
|
mine.py
|
zyyhhxx/DiscordGenshinScheduleBot
|
8208e078ce252b9810f3ab61fd8389c1e212fb42
|
[
"Apache-2.0"
] | null | null | null |
mine.py
|
zyyhhxx/DiscordGenshinScheduleBot
|
8208e078ce252b9810f3ab61fd8389c1e212fb42
|
[
"Apache-2.0"
] | null | null | null |
mine.py
|
zyyhhxx/DiscordGenshinScheduleBot
|
8208e078ce252b9810f3ab61fd8389c1e212fb42
|
[
"Apache-2.0"
] | null | null | null |
import language
from datetime import datetime
SELF = "self"
DEFAULT_MINE_REFRESH_INTERVAL = 259200
DEFAULT_MINE_NOTIFY_INTERVAL = 1800
def get_char_repr(char_name: str, language_key: str):
char_repr = language.get_word("you", language_key)
if char_name != SELF:
char_repr = char_name
return char_repr
def get_time_repr(datetime_string: str, elapsed: bool, language_key: str,
mine_refresh_interval: int = DEFAULT_MINE_REFRESH_INTERVAL):
current_datetime = datetime.now()
start_datetime = datetime.strptime(
datetime_string, "%Y-%m-%d %H:%M:%S.%f")
delta_datetime = current_datetime - start_datetime
if elapsed:
total_seconds = delta_datetime.total_seconds()
else:
total_seconds = mine_refresh_interval - delta_datetime.total_seconds()
# If the time difference is negative, normalize it to 0
if total_seconds <= 0:
total_seconds = 0
mins = (total_seconds // 60) % 60
hours = (total_seconds // 3600) % 24
days = total_seconds // 86400
time_repr = "{}{}".format(
int(mins), language.get_word("minutes", language_key))
if hours > 0:
time_repr = "{}{}".format(int(hours), language.get_word(
"hours", language_key)) + time_repr
if days > 0:
time_repr = "{}{}".format(int(days), language.get_word(
"days", language_key)) + time_repr
return time_repr
| 33.857143
| 78
| 0.668776
|
e871018126cb4bc0e30b274f03d7e4e325505b59
| 8,390
|
py
|
Python
|
parcels/examples/example_peninsula.py
|
rabernat/parcels
|
b1c3d097c8c8d8a8398b6ab1080c8c2d23350ee8
|
[
"MIT"
] | 1
|
2019-01-12T15:33:58.000Z
|
2019-01-12T15:33:58.000Z
|
parcels/examples/example_peninsula.py
|
rabernat/parcels
|
b1c3d097c8c8d8a8398b6ab1080c8c2d23350ee8
|
[
"MIT"
] | null | null | null |
parcels/examples/example_peninsula.py
|
rabernat/parcels
|
b1c3d097c8c8d8a8398b6ab1080c8c2d23350ee8
|
[
"MIT"
] | null | null | null |
from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Variable
from parcels import AdvectionRK4, AdvectionEE, AdvectionRK45
from argparse import ArgumentParser
import numpy as np
import math # NOQA
import pytest
from datetime import timedelta as delta
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def peninsula_fieldset(xdim, ydim, mesh='flat'):
"""Construct a fieldset encapsulating the flow field around an
idealised peninsula.
:param xdim: Horizontal dimension of the generated fieldset
:param xdim: Vertical dimension of the generated fieldset
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical: Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat (default): No conversion, lat/lon are assumed to be in m.
The original test description can be found in Fig. 2.2.3 in:
North, E. W., Gallego, A., Petitgas, P. (Eds). 2009. Manual of
recommended practices for modelling physical - biological
interactions during fish early life.
ICES Cooperative Research Report No. 295. 111 pp.
http://archimer.ifremer.fr/doc/00157/26792/24888.pdf
To avoid accuracy problems with interpolation from A-grid
to C-grid, we return NetCDF files that are on an A-grid.
"""
# Set Parcels FieldSet variables
# Generate the original test setup on A-grid in m
domainsizeX, domainsizeY = (1.e5, 5.e4)
dx, dy = domainsizeX / xdim, domainsizeY / ydim
La = np.linspace(dx, 1.e5-dx, xdim, dtype=np.float32)
Wa = np.linspace(dy, 5.e4-dy, ydim, dtype=np.float32)
u0 = 1
x0 = domainsizeX / 2
R = 0.32 * domainsizeX / 2
# Create the fields
x, y = np.meshgrid(La, Wa, sparse=True, indexing='xy')
P = (u0*R**2*y/((x-x0)**2+y**2)-u0*y) / 1e3
U = u0-u0*R**2*((x-x0)**2-y**2)/(((x-x0)**2+y**2)**2)
V = -2*u0*R**2*((x-x0)*y)/(((x-x0)**2+y**2)**2)
# Set land points to NaN
landpoints = P >= 0.
P[landpoints] = np.nan
U[landpoints] = np.nan
V[landpoints] = np.nan
# Convert from m to lat/lon for spherical meshes
lon = La / 1852. / 60. if mesh is 'spherical' else La
lat = Wa / 1852. / 60. if mesh is 'spherical' else Wa
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
return FieldSet.from_data(data, dimensions, mesh=mesh)
def UpdateP(particle, fieldset, time, dt):
particle.p = fieldset.P[time, particle.lon, particle.lat, particle.depth]
def pensinsula_example(fieldset, npart, mode='jit', degree=1,
verbose=False, output=True, method=AdvectionRK4):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input fieldset
:arg npart: Number of particles to intialise"""
# First, we define a custom Particle class to which we add a
# custom variable, the initial stream function value p.
# We determine the particle base class according to mode.
class MyParticle(ptype[mode]):
# JIT compilation requires a-priori knowledge of the particle
# data structure, so we define additional variables here.
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
# Initialise particles
if fieldset.U.grid.mesh == 'flat':
x = 3000 # 3 km offset from boundary
else:
x = 3. * (1. / 1.852 / 60) # 3 km offset from boundary
y = (fieldset.U.lat[0] + x, fieldset.U.lat[-1] - x) # latitude range, including offsets
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle,
start=(x, y[0]), finish=(x, y[1]), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Advect the particles for 24h
time = delta(hours=24)
dt = delta(minutes=5)
k_adv = pset.Kernel(method)
k_p = pset.Kernel(UpdateP)
out = pset.ParticleFile(name="MyParticle", outputdt=delta(hours=1)) if output else None
print("Peninsula: Advecting %d particles for %s" % (npart, str(time)))
pset.execute(k_adv + k_p, runtime=time, dt=dt, output_file=out)
if verbose:
print("Final particle positions:\n%s" % pset)
return pset
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_fieldset(mode, mesh):
"""Execute peninsula test from fieldset generated in memory"""
fieldset = peninsula_fieldset(100, 50, mesh)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.lon, p.lat, p.depth]) for p in pset])
assert(err_smpl <= 1.e-3).all()
@pytest.fixture(scope='module')
def fieldsetfile(mesh):
"""Generate fieldset files for peninsula test"""
filename = 'peninsula'
fieldset = peninsula_fieldset(100, 50, mesh=mesh)
fieldset.write(filename)
return filename
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_file(mode, mesh):
"""Open fieldset files and execute"""
fieldset = FieldSet.from_parcels(fieldsetfile(mesh), extra_fields={'P': 'P'}, allow_time_extrapolation=True)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.lon, p.lat, p.depth]) for p in pset])
assert(err_smpl <= 1.e-3).all()
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing RK4 computation')
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
p.add_argument('-d', '--degree', type=int, default=1,
help='Degree of spatial interpolation')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-o', '--nooutput', action='store_true', default=False,
help='Suppress trajectory output')
p.add_argument('--profiling', action='store_true', default=False,
help='Print profiling information after run')
p.add_argument('-f', '--fieldset', type=int, nargs=2, default=None,
help='Generate fieldset file with given dimensions')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
args = p.parse_args()
if args.fieldset is not None:
filename = 'peninsula'
fieldset = peninsula_fieldset(args.fieldset[0], args.fieldset[1], mesh='flat')
fieldset.write(filename)
# Open fieldset file set
fieldset = FieldSet.from_parcels('peninsula', extra_fields={'P': 'P'}, allow_time_extrapolation=True)
if args.profiling:
from cProfile import runctx
from pstats import Stats
runctx("pensinsula_example(fieldset, args.particles, mode=args.mode,\
degree=args.degree, verbose=args.verbose,\
output=not args.nooutput, method=method[args.method])",
globals(), locals(), "Profile.prof")
Stats("Profile.prof").strip_dirs().sort_stats("time").print_stats(10)
else:
pensinsula_example(fieldset, args.particles, mode=args.mode,
degree=args.degree, verbose=args.verbose,
output=not args.nooutput, method=method[args.method])
| 43.025641
| 112
| 0.650775
|
43994b0ccf14f2fb9c756f42b8be98b81d648589
| 4,132
|
py
|
Python
|
benchmark/startQiskit_QC1986.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC1986.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC1986.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=31
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=28
prog.cz(input_qubit[0],input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.48380526865282825,input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.y(input_qubit[3]) # number=18
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.x(input_qubit[1]) # number=27
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=22
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[0]) # number=14
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1986.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.722689
| 165
| 0.654647
|
34da1e34f084f70d690f08194cc9a7e3fabf1bbc
| 362
|
py
|
Python
|
ValidAnagram.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | 1
|
2016-12-22T04:09:25.000Z
|
2016-12-22T04:09:25.000Z
|
ValidAnagram.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
ValidAnagram.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""# 先排序,后比较
l1=sorted([x for x in s.lower()])
l2=sorted([y for y in t.lower()])
s1="".join(l1)
s2="".join(l2)
if s1==s2:
return True
else:
return False
| 24.133333
| 41
| 0.433702
|
8781006f5ad4896c65f6e56e8743cdb25d0c914f
| 16,589
|
py
|
Python
|
optuna/storages/_rdb/models.py
|
captain-pool/optuna
|
2ae8c17afea54362460320870304c763e91c0596
|
[
"MIT"
] | 4,950
|
2019-11-15T07:35:51.000Z
|
2022-03-31T10:32:42.000Z
|
optuna/storages/_rdb/models.py
|
SCUTJcfeng/optuna
|
9331374a2460da067a6922e4ea09dd4706f3d950
|
[
"MIT"
] | 2,490
|
2019-11-15T07:06:20.000Z
|
2022-03-31T23:52:45.000Z
|
optuna/storages/_rdb/models.py
|
SCUTJcfeng/optuna
|
9331374a2460da067a6922e4ea09dd4706f3d950
|
[
"MIT"
] | 621
|
2019-11-15T11:26:57.000Z
|
2022-03-28T11:46:34.000Z
|
from typing import Any
from typing import List
from typing import Optional
from sqlalchemy import asc
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import desc
from sqlalchemy import Enum
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import orm
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from optuna import distributions
from optuna.study._study_direction import StudyDirection
from optuna.trial import TrialState
# Don't modify this version number anymore.
# The schema management functionality has been moved to alembic.
SCHEMA_VERSION = 12
MAX_INDEXED_STRING_LENGTH = 512
MAX_VERSION_LENGTH = 256
NOT_FOUND_MSG = "Record does not exist."
BaseModel: Any = declarative_base()
class StudyModel(BaseModel):
__tablename__ = "studies"
study_id = Column(Integer, primary_key=True)
study_name = Column(String(MAX_INDEXED_STRING_LENGTH), index=True, unique=True, nullable=False)
@classmethod
def find_or_raise_by_id(
cls, study_id: int, session: orm.Session, for_update: bool = False
) -> "StudyModel":
query = session.query(cls).filter(cls.study_id == study_id)
if for_update:
query = query.with_for_update()
study = query.one_or_none()
if study is None:
raise KeyError(NOT_FOUND_MSG)
return study
@classmethod
def find_by_name(cls, study_name: str, session: orm.Session) -> Optional["StudyModel"]:
study = session.query(cls).filter(cls.study_name == study_name).one_or_none()
return study
@classmethod
def find_or_raise_by_name(cls, study_name: str, session: orm.Session) -> "StudyModel":
study = cls.find_by_name(study_name, session)
if study is None:
raise KeyError(NOT_FOUND_MSG)
return study
class StudyDirectionModel(BaseModel):
__tablename__ = "study_directions"
__table_args__: Any = (UniqueConstraint("study_id", "objective"),)
study_direction_id = Column(Integer, primary_key=True)
direction = Column(Enum(StudyDirection), nullable=False)
study_id = Column(Integer, ForeignKey("studies.study_id"), nullable=False)
objective = Column(Integer, nullable=False)
study = orm.relationship(
StudyModel, backref=orm.backref("directions", cascade="all, delete-orphan")
)
@classmethod
def find_by_study_and_objective(
cls, study: StudyModel, objective: int, session: orm.Session
) -> Optional["StudyDirectionModel"]:
study_direction = (
session.query(cls)
.filter(cls.study_id == study.study_id)
.filter(cls.objective == objective)
.one_or_none()
)
return study_direction
@classmethod
def where_study_id(cls, study_id: int, session: orm.Session) -> List["StudyDirectionModel"]:
return session.query(cls).filter(cls.study_id == study_id).all()
class StudyUserAttributeModel(BaseModel):
__tablename__ = "study_user_attributes"
__table_args__: Any = (UniqueConstraint("study_id", "key"),)
study_user_attribute_id = Column(Integer, primary_key=True)
study_id = Column(Integer, ForeignKey("studies.study_id"))
key = Column(String(MAX_INDEXED_STRING_LENGTH))
value_json = Column(Text())
study = orm.relationship(
StudyModel, backref=orm.backref("user_attributes", cascade="all, delete-orphan")
)
@classmethod
def find_by_study_and_key(
cls, study: StudyModel, key: str, session: orm.Session
) -> Optional["StudyUserAttributeModel"]:
attribute = (
session.query(cls)
.filter(cls.study_id == study.study_id)
.filter(cls.key == key)
.one_or_none()
)
return attribute
@classmethod
def where_study_id(
cls, study_id: int, session: orm.Session
) -> List["StudyUserAttributeModel"]:
return session.query(cls).filter(cls.study_id == study_id).all()
class StudySystemAttributeModel(BaseModel):
__tablename__ = "study_system_attributes"
__table_args__: Any = (UniqueConstraint("study_id", "key"),)
study_system_attribute_id = Column(Integer, primary_key=True)
study_id = Column(Integer, ForeignKey("studies.study_id"))
key = Column(String(MAX_INDEXED_STRING_LENGTH))
value_json = Column(Text())
study = orm.relationship(
StudyModel, backref=orm.backref("system_attributes", cascade="all, delete-orphan")
)
@classmethod
def find_by_study_and_key(
cls, study: StudyModel, key: str, session: orm.Session
) -> Optional["StudySystemAttributeModel"]:
attribute = (
session.query(cls)
.filter(cls.study_id == study.study_id)
.filter(cls.key == key)
.one_or_none()
)
return attribute
@classmethod
def where_study_id(
cls, study_id: int, session: orm.Session
) -> List["StudySystemAttributeModel"]:
return session.query(cls).filter(cls.study_id == study_id).all()
class TrialModel(BaseModel):
__tablename__ = "trials"
trial_id = Column(Integer, primary_key=True)
# No `UniqueConstraint` is put on the `number` columns although it in practice is constrained
# to be unique. This is to reduce code complexity as table-level locking would be required
# otherwise. See https://github.com/optuna/optuna/pull/939#discussion_r387447632.
number = Column(Integer)
study_id = Column(Integer, ForeignKey("studies.study_id"))
state = Column(Enum(TrialState), nullable=False)
datetime_start = Column(DateTime)
datetime_complete = Column(DateTime)
study = orm.relationship(
StudyModel, backref=orm.backref("trials", cascade="all, delete-orphan")
)
@classmethod
def find_max_value_trial(
cls, study_id: int, objective: int, session: orm.Session
) -> "TrialModel":
trial = (
session.query(cls)
.filter(cls.study_id == study_id)
.filter(cls.state == TrialState.COMPLETE)
.join(TrialValueModel)
.filter(TrialValueModel.objective == objective)
.order_by(desc(TrialValueModel.value))
.limit(1)
.one_or_none()
)
if trial is None:
raise ValueError(NOT_FOUND_MSG)
return trial
@classmethod
def find_min_value_trial(
cls, study_id: int, objective: int, session: orm.Session
) -> "TrialModel":
trial = (
session.query(cls)
.filter(cls.study_id == study_id)
.filter(cls.state == TrialState.COMPLETE)
.join(TrialValueModel)
.filter(TrialValueModel.objective == objective)
.order_by(asc(TrialValueModel.value))
.limit(1)
.one_or_none()
)
if trial is None:
raise ValueError(NOT_FOUND_MSG)
return trial
@classmethod
def find_or_raise_by_id(
cls, trial_id: int, session: orm.Session, for_update: bool = False
) -> "TrialModel":
query = session.query(cls).filter(cls.trial_id == trial_id)
# "FOR UPDATE" clause is used for row-level locking.
# Please note that SQLite3 doesn't support this clause.
if for_update:
query = query.with_for_update()
trial = query.one_or_none()
if trial is None:
raise KeyError(NOT_FOUND_MSG)
return trial
@classmethod
def count(
cls,
session: orm.Session,
study: Optional[StudyModel] = None,
state: Optional[TrialState] = None,
) -> int:
trial_count = session.query(func.count(cls.trial_id))
if study is not None:
trial_count = trial_count.filter(cls.study_id == study.study_id)
if state is not None:
trial_count = trial_count.filter(cls.state == state)
return trial_count.scalar()
def count_past_trials(self, session: orm.Session) -> int:
trial_count = session.query(func.count(TrialModel.trial_id)).filter(
TrialModel.study_id == self.study_id, TrialModel.trial_id < self.trial_id
)
return trial_count.scalar()
class TrialUserAttributeModel(BaseModel):
__tablename__ = "trial_user_attributes"
__table_args__: Any = (UniqueConstraint("trial_id", "key"),)
trial_user_attribute_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"))
key = Column(String(MAX_INDEXED_STRING_LENGTH))
value_json = Column(Text())
trial = orm.relationship(
TrialModel, backref=orm.backref("user_attributes", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_key(
cls, trial: TrialModel, key: str, session: orm.Session
) -> Optional["TrialUserAttributeModel"]:
attribute = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.key == key)
.one_or_none()
)
return attribute
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> List["TrialUserAttributeModel"]:
return session.query(cls).filter(cls.trial_id == trial_id).all()
class TrialSystemAttributeModel(BaseModel):
__tablename__ = "trial_system_attributes"
__table_args__: Any = (UniqueConstraint("trial_id", "key"),)
trial_system_attribute_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"))
key = Column(String(MAX_INDEXED_STRING_LENGTH))
value_json = Column(Text())
trial = orm.relationship(
TrialModel, backref=orm.backref("system_attributes", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_key(
cls, trial: TrialModel, key: str, session: orm.Session
) -> Optional["TrialSystemAttributeModel"]:
attribute = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.key == key)
.one_or_none()
)
return attribute
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> List["TrialSystemAttributeModel"]:
return session.query(cls).filter(cls.trial_id == trial_id).all()
class TrialParamModel(BaseModel):
__tablename__ = "trial_params"
__table_args__: Any = (UniqueConstraint("trial_id", "param_name"),)
param_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"))
param_name = Column(String(MAX_INDEXED_STRING_LENGTH))
param_value = Column(Float)
distribution_json = Column(Text())
trial = orm.relationship(
TrialModel, backref=orm.backref("params", cascade="all, delete-orphan")
)
def check_and_add(self, session: orm.Session) -> None:
self._check_compatibility_with_previous_trial_param_distributions(session)
session.add(self)
def _check_compatibility_with_previous_trial_param_distributions(
self, session: orm.Session
) -> None:
trial = TrialModel.find_or_raise_by_id(self.trial_id, session)
previous_record = (
session.query(TrialParamModel)
.join(TrialModel)
.filter(TrialModel.study_id == trial.study_id)
.filter(TrialParamModel.param_name == self.param_name)
.first()
)
if previous_record is not None:
distributions.check_distribution_compatibility(
distributions.json_to_distribution(previous_record.distribution_json),
distributions.json_to_distribution(self.distribution_json),
)
@classmethod
def find_by_trial_and_param_name(
cls, trial: TrialModel, param_name: str, session: orm.Session
) -> Optional["TrialParamModel"]:
param_distribution = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.param_name == param_name)
.one_or_none()
)
return param_distribution
@classmethod
def find_or_raise_by_trial_and_param_name(
cls, trial: TrialModel, param_name: str, session: orm.Session
) -> "TrialParamModel":
param_distribution = cls.find_by_trial_and_param_name(trial, param_name, session)
if param_distribution is None:
raise KeyError(NOT_FOUND_MSG)
return param_distribution
@classmethod
def where_trial_id(cls, trial_id: int, session: orm.Session) -> List["TrialParamModel"]:
trial_params = session.query(cls).filter(cls.trial_id == trial_id).all()
return trial_params
class TrialValueModel(BaseModel):
__tablename__ = "trial_values"
__table_args__: Any = (UniqueConstraint("trial_id", "objective"),)
trial_value_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
objective = Column(Integer, nullable=False)
value = Column(Float, nullable=False)
trial = orm.relationship(
TrialModel, backref=orm.backref("values", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_objective(
cls, trial: TrialModel, objective: int, session: orm.Session
) -> Optional["TrialValueModel"]:
trial_value = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.objective == objective)
.one_or_none()
)
return trial_value
@classmethod
def where_trial_id(cls, trial_id: int, session: orm.Session) -> List["TrialValueModel"]:
trial_values = (
session.query(cls).filter(cls.trial_id == trial_id).order_by(asc(cls.objective)).all()
)
return trial_values
class TrialIntermediateValueModel(BaseModel):
__tablename__ = "trial_intermediate_values"
__table_args__: Any = (UniqueConstraint("trial_id", "step"),)
trial_intermediate_value_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
step = Column(Integer, nullable=False)
intermediate_value = Column(Float, nullable=False)
trial = orm.relationship(
TrialModel, backref=orm.backref("intermediate_values", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_step(
cls, trial: TrialModel, step: int, session: orm.Session
) -> Optional["TrialIntermediateValueModel"]:
trial_intermediate_value = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.step == step)
.one_or_none()
)
return trial_intermediate_value
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> List["TrialIntermediateValueModel"]:
trial_intermediate_values = session.query(cls).filter(cls.trial_id == trial_id).all()
return trial_intermediate_values
class TrialHeartbeatModel(BaseModel):
__tablename__ = "trial_heartbeats"
__table_args__: Any = (UniqueConstraint("trial_id"),)
trial_heartbeat_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
heartbeat = Column(DateTime, nullable=False, default=func.current_timestamp())
trial = orm.relationship(
TrialModel, backref=orm.backref("heartbeats", cascade="all, delete-orphan")
)
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> Optional["TrialHeartbeatModel"]:
return session.query(cls).filter(cls.trial_id == trial_id).one_or_none()
class VersionInfoModel(BaseModel):
__tablename__ = "version_info"
# setting check constraint to ensure the number of rows is at most 1
__table_args__: Any = (CheckConstraint("version_info_id=1"),)
version_info_id = Column(Integer, primary_key=True, autoincrement=False, default=1)
schema_version = Column(Integer)
library_version = Column(String(MAX_VERSION_LENGTH))
@classmethod
def find(cls, session: orm.Session) -> "VersionInfoModel":
version_info = session.query(cls).one_or_none()
return version_info
| 32.274319
| 99
| 0.669781
|
0941f7530f4d2f785a1618d6233ea0e40fa0379e
| 19,338
|
py
|
Python
|
objc4/openSources/xnu-4570.71.2/tools/lldbmacros/core/cvalue.py
|
honeyeeb/objc4-723
|
020a0dd480818207211835aefe04f44007c09575
|
[
"MIT"
] | null | null | null |
objc4/openSources/xnu-4570.71.2/tools/lldbmacros/core/cvalue.py
|
honeyeeb/objc4-723
|
020a0dd480818207211835aefe04f44007c09575
|
[
"MIT"
] | null | null | null |
objc4/openSources/xnu-4570.71.2/tools/lldbmacros/core/cvalue.py
|
honeyeeb/objc4-723
|
020a0dd480818207211835aefe04f44007c09575
|
[
"MIT"
] | null | null | null |
"""
Defines a class value which encapsulates the basic lldb Scripting Bridge APIs. This provides an easy
wrapper to extract information from C based constructs.
|------- core.value------------|
| |--lldb Scripting Bridge--| |
| | |--lldb core--| | |
| |-------------------------| |
|------------------------------|
Use the member function GetSBValue() to access the base Scripting Bridge value.
"""
import lldb
import re
from lazytarget import *
_cstring_rex = re.compile("((?:\s*|const\s+)\s*char(?:\s+\*|\s+[A-Za-z_0-9]*\s*\[|)\s*)",re.MULTILINE|re.DOTALL)
class value(object):
'''A class designed to wrap lldb.SBValue() objects so the resulting object
can be used as a variable would be in code. So if you have a Point structure
variable in your code in the current frame named "pt", you can initialize an instance
of this class with it:
pt = lldb.value(lldb.frame.FindVariable("pt"))
print pt
print pt.x
print pt.y
pt = lldb.value(lldb.frame.FindVariable("rectangle_array"))
print rectangle_array[12]
print rectangle_array[5].origin.x'''
def __init__(self, sbvalue):
#_sbval19k84obscure747 is specifically chosen to be obscure.
#This avoids conflicts when attributes could mean any field value in code
self._sbval19k84obscure747 = sbvalue
self._sbval19k84obscure747_type = sbvalue.GetType()
self._sbval19k84obscure747_is_ptr = sbvalue.GetType().IsPointerType()
self.sbvalue = sbvalue
def __nonzero__(self):
return ( self._sbval19k84obscure747.__nonzero__() and self._GetValueAsUnsigned() != 0 )
def __repr__(self):
return self._sbval19k84obscure747.__str__()
def __cmp__(self, other):
if type(other) is int or type(other) is long:
me = int(self)
if type(me) is long:
other = long(other)
return me.__cmp__(other)
if type(other) is value:
return int(self).__cmp__(int(other))
raise TypeError("Cannot compare value with type {}".format(type(other)))
def __str__(self):
global _cstring_rex
type_name = self._sbval19k84obscure747_type.GetName()
if len(_cstring_rex.findall(type_name)) > 0 :
return self._GetValueAsString()
summary = self._sbval19k84obscure747.GetSummary()
if summary:
return summary.strip('"')
return self._sbval19k84obscure747.__str__()
def __getitem__(self, key):
# Allow array access if this value has children...
if type(key) is slice:
_start = int(key.start)
_end = int(key.stop)
_step = 1
if key.step != None:
_step = int(key.step)
retval = []
while _start < _end:
retval.append(self[_start])
_start += _step
return retval
if type(key) in (int, long):
return value(self._sbval19k84obscure747.GetValueForExpressionPath("[%i]" % key))
if type(key) is value:
return value(self._sbval19k84obscure747.GetValueForExpressionPath("[%i]" % int(key)))
raise TypeError("Cannot fetch Array item for this type")
def __getattr__(self, name):
child_sbvalue = self._sbval19k84obscure747.GetChildMemberWithName (name)
if child_sbvalue:
return value(child_sbvalue)
raise AttributeError("No field by name: "+name )
def __add__(self, other):
return int(self) + int(other)
def __radd__(self, other):
return int(self) + int(other)
def __sub__(self, other):
return int(self) - int(other)
def __rsub__(self, other):
return int(other) - int(self)
def __mul__(self, other):
return int(self) * int(other)
def __rmul__(self, other):
return int(self) * int(other)
def __floordiv__(self, other):
return int(self) // int(other)
def __mod__(self, other):
return int(self) % int(other)
def __rmod__(self, other):
return int(other) % int(self)
def __divmod__(self, other):
return int(self) % int(other)
def __rdivmod__(self, other):
return int(other) % int(self)
def __pow__(self, other):
return int(self) ** int(other)
def __lshift__(self, other):
return int(self) << int(other)
def __rshift__(self, other):
return int(self) >> int(other)
def __and__(self, other):
return int(self) & int(other)
def __rand(self, other):
return int(self) & int(other)
def __xor__(self, other):
return int(self) ^ int(other)
def __or__(self, other):
return int(self) | int(other)
def __div__(self, other):
return int(self) / int(other)
def __rdiv__(self, other):
return int(other)/int(self)
def __truediv__(self, other):
return int(self) / int(other)
def __iadd__(self, other):
result = self.__add__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __isub__(self, other):
result = self.__sub__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __imul__(self, other):
result = self.__mul__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __idiv__(self, other):
result = self.__div__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __itruediv__(self, other):
result = self.__truediv__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ifloordiv__(self, other):
result = self.__floordiv__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __imod__(self, other):
result = self.__and__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ipow__(self, other):
result = self.__pow__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ipow__(self, other, modulo):
result = self.__pow__(self, other, modulo)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ilshift__(self, other):
result = self.__lshift__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __irshift__(self, other):
result = self.__rshift__(other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __iand__(self, other):
result = self.__and__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ixor__(self, other):
result = self.__xor__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __ior__(self, other):
result = self.__ior__(self, other)
self._sbval19k84obscure747.SetValueFromCString (str(result))
return result
def __neg__(self):
return -int(self)
def __pos__(self):
return +int(self)
def __abs__(self):
return abs(int(self))
def __invert__(self):
return ~int(self)
def __complex__(self):
return complex (int(self))
def __int__(self):
if self._sbval19k84obscure747_is_ptr:
return self._GetValueAsUnsigned()
tname= self._sbval19k84obscure747_type.GetName()
if tname.find('uint') >= 0 or tname.find('unsigned') >= 0:
return self._GetValueAsUnsigned()
retval = self._sbval19k84obscure747.GetValueAsSigned()
# <rdar://problem/12481949> lldb python: GetValueAsSigned does not return the correct value
if (retval & 0x80000000):
retval = retval - 0x100000000
return retval
def __long__(self):
return self._sbval19k84obscure747.GetValueAsSigned()
def __float__(self):
return float (self._sbval19k84obscure747.GetValueAsSigned())
def __oct__(self):
return '0%o' % self._GetValueAsUnsigned()
def __hex__(self):
return '0x%x' % self._GetValueAsUnsigned()
def __eq__(self, other):
self_err = lldb.SBError()
other_err = lldb.SBError()
self_val = self._sbval19k84obscure747.GetValueAsUnsigned(self_err)
if self_err.fail:
raise ValueError("unable to extract value of self")
if type(other) is value:
other_val = other._sbval19k84obscure747.GetValueAsUnsigned(other_err)
if other_err.fail:
raise ValueError("unable to extract value of other")
return self_val == other_val
if type(other) is int:
return int(self) == other
raise TypeError("Equality operation is not defined for this type.")
def __neq__(self, other):
return not self.__eq__(other)
def GetSBValue(self):
return self._sbval19k84obscure747
def __getstate__(self):
err = lldb.SBError()
if self._sbval19k84obscure747_is_ptr:
addr = self._sbval19k84obscure747.GetValueAsUnsigned()
size = self._sbval19k84obscure747_type.GetPointeeType().GetByteSize()
else:
addr = self._sbval19k84obscure747.AddressOf().GetValueAsUnsigned()
size = self._sbval19k84obscure747_type.GetByteSize()
content = LazyTarget.GetProcess().ReadMemory(addr, size, err)
if err.fail:
content = ''
return content
def _GetValueAsSigned(self):
serr = lldb.SBError()
retval = self._sbval19k84obscure747.GetValueAsSigned(serr)
if serr.success:
return retval
raise ValueError("Failed to read signed data. "+ str(self._sbval19k84obscure747) +"(type =" + str(self._sbval19k84obscure747_type) + ") Error description: " + serr.GetCString())
def _GetValueAsUnsigned(self):
serr = lldb.SBError()
retval = self._sbval19k84obscure747.GetValueAsUnsigned(serr)
if serr.success:
return retval
raise ValueError("Failed to read unsigned data. "+ str(self._sbval19k84obscure747) +"(type =" + str(self._sbval19k84obscure747_type) + ") Error description: " + serr.GetCString())
def _GetValueAsString(self, offset = 0, maxlen = 1024):
serr = lldb.SBError()
sbdata = None
if self._sbval19k84obscure747.TypeIsPointerType():
sbdata = self._sbval19k84obscure747.GetPointeeData(offset, maxlen)
else:
sbdata = self._sbval19k84obscure747.GetData()
retval = ''
bytesize = sbdata.GetByteSize()
if bytesize == 0 :
#raise ValueError('Unable to read value as string')
return ''
for i in range(0, bytesize) :
serr.Clear()
ch = chr(sbdata.GetUnsignedInt8(serr, i))
if serr.fail :
raise ValueError("Unable to read string data: " + serr.GetCString())
if ch == '\0':
break
retval += ch
return retval
def __format__(self, format_spec):
ret_format = "{0:"+format_spec+"}"
# typechar is last char. see http://www.python.org/dev/peps/pep-3101/
type_spec=format_spec.strip().lower()[-1]
if type_spec == 'x':
return ret_format.format(self._GetValueAsUnsigned())
if type_spec == 'd':
return ret_format.format(int(self))
if type_spec == 's':
return ret_format.format(str(self))
if type_spec == 'o':
return ret_format.format(int(oct(self)))
if type_spec == 'c':
return ret_format.format(int(self))
return "unknown format " + format_spec + str(self)
def unsigned(val):
""" Helper function to get unsigned value from core.value
params: val - value (see value class above) representation of an integer type
returns: int which is unsigned.
raises : ValueError if the type cannot be represented as unsigned int.
"""
if type(val) is value:
return val._GetValueAsUnsigned()
return int(val)
def sizeof(t):
""" Find the byte size of a type.
params: t - str : ex 'time_spec' returns equivalent of sizeof(time_spec) in C
t - value: ex a value object. returns size of the object
returns: int - byte size length
"""
if type(t) is value :
return t.GetSBValue().GetByteSize()
if type(t) is str:
return gettype(t).GetByteSize()
raise ValueError("Cannot get sizeof. Invalid argument")
def dereference(val):
""" Get a dereferenced obj for a pointer type obj
params: val - value object representing a pointer type C construct in lldb
returns: value - value
ex. val = dereference(ptr_obj) #python
is same as
obj_ptr = (int *)0x1234 #C
val = *obj_ptr #C
"""
if type(val) is value and val.GetSBValue().TypeIsPointerType():
return value(val.GetSBValue().Dereference())
raise TypeError('Cannot dereference this type.')
def addressof(val):
""" Get address of a core.value object.
params: val - value object representing a C construct in lldb
returns: value - value object referring to 'type(val) *' type
ex. addr = addressof(hello_obj) #python
is same as
uintptr_t addr = (uintptr_t)&hello_obj #C
"""
if type(val) is value:
return value(val.GetSBValue().AddressOf())
raise TypeError("Cannot do addressof for non-value type objects")
def cast(obj, target_type):
""" Type cast an object to another C type.
params:
obj - core.value object representing some C construct in lldb
target_type - str : ex 'char *'
- lldb.SBType :
"""
dest_type = target_type
if type(target_type) is str:
dest_type = gettype(target_type)
elif type(target_type) is value:
dest_type = target_type.GetSBValue().GetType()
if type(obj) is value :
return value(obj.GetSBValue().Cast(dest_type))
elif type(obj) is int:
print "ERROR: You cannot cast an 'int' to %s, please use kern.GetValueFromAddress() for such purposes." % str(target_type)
raise TypeError("object of type %s cannot be casted to %s" % (str(type(obj)), str(target_type)))
_value_types_cache={}
def gettype(target_type):
""" Returns lldb.SBType of the given target_type
params:
target_type - str, ex. 'char', 'uint32_t' etc
returns:
lldb.SBType - SBType corresponding to the given target_type
raises:
NameError - Incase the type is not identified
"""
global _value_types_cache
target_type = str(target_type).strip()
if target_type in _value_types_cache:
return _value_types_cache[target_type]
target_type = target_type.strip()
requested_type_is_struct = False
m = re.match(r'\s*struct\s*(.*)$', target_type)
if m:
requested_type_is_struct = True
target_type = m.group(1)
tmp_type = None
requested_type_is_pointer = False
if target_type.endswith('*') :
requested_type_is_pointer = True
# tmp_type = LazyTarget.GetTarget().FindFirstType(target_type.rstrip('*').strip())
search_type = target_type.rstrip('*').strip()
type_arr = [t for t in LazyTarget.GetTarget().FindTypes(search_type)]
if requested_type_is_struct:
type_arr = [t for t in type_arr if t.type == lldb.eTypeClassStruct]
# After the sort, the struct type with more fields will be at index [0].
# This hueristic helps selecting struct type with more fields compared to ones with "opaque" members
type_arr.sort(reverse=True, key=lambda x: x.GetNumberOfFields())
if len(type_arr) > 0:
tmp_type = type_arr[0]
else:
raise NameError('Unable to find type '+target_type)
if not tmp_type.IsValid():
raise NameError('Unable to Cast to type '+target_type)
if requested_type_is_pointer:
tmp_type = tmp_type.GetPointerType()
_value_types_cache[target_type] = tmp_type
return _value_types_cache[target_type]
def getfieldoffset(struct_type, field_name):
""" Returns the byte offset of a field inside a given struct
Understands anonymous unions and field names in sub-structs
params:
struct_type - str or lldb.SBType, ex. 'struct ipc_port *' or port.gettype()
field_name - str, name of the field inside the struct ex. 'ip_messages'
returns:
int - byte offset of the field_name inside the struct_type
raises:
TypeError - - In case the struct_type has no field with the name field_name
"""
if type(struct_type) == str:
struct_type = gettype(struct_type)
if '.' in field_name :
# Handle recursive fields in sub-structs
components = field_name.split('.', 1)
for field in struct_type.get_fields_array():
if str(field.GetName()) == components[0]:
return getfieldoffset(struct_type, components[0]) + getfieldoffset(field.GetType(), components[1])
raise TypeError('Field name "%s" not found in type "%s"' % (components[0], str(struct_type)))
offset = 0
for field in struct_type.get_fields_array():
if str(field.GetName()) == field_name:
return field.GetOffsetInBytes()
# Hack for anonymous unions - the compiler does this, so cvalue should too
if field.GetName() is None and field.GetType().GetTypeClass() == lldb.eTypeClassUnion :
for union_field in field.GetType().get_fields_array():
if str(union_field.GetName()) == field_name:
return union_field.GetOffsetInBytes() + field.GetOffsetInBytes()
raise TypeError('Field name "%s" not found in type "%s"' % (field_name, str(struct_type)))
def islong(x):
""" Returns True if a string represents a long integer, False otherwise
"""
try:
long(x,16)
except ValueError:
try:
long(x)
except ValueError:
return False
return True
def readmemory(val):
""" Returns a string of hex data that is referenced by the value.
params: val - a value object.
return: str - string of hex bytes.
raises: TypeError if val is not a valid type
"""
if not type(val) is value:
raise TypeError('%s is not of type value' % str(type(val)))
return val.__getstate__()
| 36.486792
| 187
| 0.616506
|
cb86833dc1005d5379413fe528910c25f4eba3fd
| 380
|
py
|
Python
|
lebanese_channels/services/teleliban.py
|
ChadiEM/Lebanese-Channels
|
8e26f6f4b021367c57eb4ee966e3d4c1511aa77a
|
[
"MIT"
] | 10
|
2017-05-10T04:23:43.000Z
|
2020-04-09T19:39:45.000Z
|
lebanese_channels/services/teleliban.py
|
ChadiEM/Kodi-Lebanese-Channels
|
8e26f6f4b021367c57eb4ee966e3d4c1511aa77a
|
[
"MIT"
] | null | null | null |
lebanese_channels/services/teleliban.py
|
ChadiEM/Kodi-Lebanese-Channels
|
8e26f6f4b021367c57eb4ee966e3d4c1511aa77a
|
[
"MIT"
] | 2
|
2019-05-06T23:01:37.000Z
|
2019-11-06T10:23:24.000Z
|
from lebanese_channels.channel import Channel
from lebanese_channels.utils import stream
class TeleLiban(Channel):
def get_name(self) -> str:
return 'Tele Liban'
def get_logo(self) -> str:
return 'http://www.teleliban.com.lb/images/telelogo.png'
def get_stream_url(self) -> str:
return stream.fetch_from('http://www.teleliban.com.lb/live')
| 27.142857
| 68
| 0.7
|
61be22e3551fc2a574748db217b99a43a4a4ad90
| 542
|
py
|
Python
|
tests/unit/core/auth/transaction/test_stdfee.py
|
bongtrop/jigu
|
448bce8ce693f3f7c530bea0f2f268e22100937a
|
[
"MIT"
] | 14
|
2020-03-03T06:46:39.000Z
|
2021-05-01T15:29:35.000Z
|
tests/unit/core/auth/transaction/test_stdfee.py
|
bongtrop/jigu
|
448bce8ce693f3f7c530bea0f2f268e22100937a
|
[
"MIT"
] | 9
|
2020-03-09T06:36:30.000Z
|
2021-02-15T14:40:48.000Z
|
tests/unit/core/auth/transaction/test_stdfee.py
|
bongtrop/jigu
|
448bce8ce693f3f7c530bea0f2f268e22100937a
|
[
"MIT"
] | 5
|
2020-05-30T22:38:34.000Z
|
2021-02-11T00:56:20.000Z
|
import json
import pytest
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from hypothesis_jsonschema import from_schema
from jigu.core.auth.transaction import StdFee
class TestStdFeeSerdes:
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(fee=from_schema(StdFee.__schema__))
def test_schema(self, fee):
x = StdFee.deserialize(fee)
y = StdFee.deserialize(json.loads(x.to_json()))
assert x == y
| 28.526316
| 88
| 0.756458
|
377b999d1702ea69618a11e8e9b43adca2c36277
| 19,670
|
py
|
Python
|
reposts/reposts.py
|
GitAcrown/RedAppsv2
|
a3a1fb5a5c659ce6e54e62503012a79a71763d47
|
[
"MIT"
] | 1
|
2022-03-07T01:54:10.000Z
|
2022-03-07T01:54:10.000Z
|
reposts/reposts.py
|
GitAcrown/RedAppsv2
|
a3a1fb5a5c659ce6e54e62503012a79a71763d47
|
[
"MIT"
] | null | null | null |
reposts/reposts.py
|
GitAcrown/RedAppsv2
|
a3a1fb5a5c659ce6e54e62503012a79a71763d47
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from copy import copy
from datetime import datetime, timedelta
import logging
import re
import discord
import typing
from discord.ext import tasks
from redbot.core import Config, commands, checks
from redbot.core.utils.chat_formatting import box
logger = logging.getLogger("red.RedAppsv2.reposts")
class RepostData:
def __init__(self, url: str, data: list):
self.url = url
self._raw = data
@property
def data(self):
return self.get_data()
def get_data(self):
RepostSeen = namedtuple('RepostSeen', ('message', 'jump_url', 'author', 'timestamp'))
formatted = []
for k in self._raw:
formatted.append(RepostSeen(**k))
return formatted
class Reposts(commands.Cog):
"""Détecteur de reposts"""
def __init__(self, bot):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, identifier=736144321857978388, force_registration=True)
default_guild = {'whitelist': {'users': [],
'channels': [],
'roles': [],
'links_greedy': [],
'links_lazy': []},
'autodelete': {'greedy': [],
'lazy': []},
'delete_after': False,
'cache': {},
'toggled': False}
self.config.register_guild(**default_guild)
self.repost_emoji = self.bot.get_emoji(812380539319091230)
self.reposts_cache_clear.start()
@tasks.loop(hours=12)
async def reposts_cache_clear(self):
await self.clear_reposts_cache()
@reposts_cache_clear.before_loop
async def before_reposts_loop(self):
logger.info('Starting reposts loop...')
await self.bot.wait_until_ready()
async def clear_reposts_cache(self):
all_guilds = await self.config.all_guilds()
for g in all_guilds:
cache = all_guilds[g]['cache']
new = copy(cache)
for url in cache:
for k in cache[url]:
if datetime.now().fromisoformat(k['timestamp']) < datetime.now() - timedelta(days=14):
new[url].remove(k)
if not cache[url]:
del new[url]
if cache != new:
await self.config.guild_from_id(g).cache.set(new)
async def is_whitelisted(self, message: discord.Message, link: str):
author, channel = message.author, message.channel
wl = await self.config.guild(message.guild).whitelist()
if author.id in wl["users"] or channel.id in wl["channels"]:
return True
elif [r for r in wl["roles"] if r in [n.id for n in author.roles]]:
return True
if link in wl["links_greedy"]:
return True
elif [l for l in wl["links_lazy"] if link.startswith(l)]:
return True
return False
def canon_link(self, link: str):
is_yt = re.compile(r'https://www\.youtube\.com/watch\?v=([\w\-]*)', re.DOTALL | re.IGNORECASE).findall(
link)
if is_yt:
return "https://youtu.be/{}".format(is_yt[0])
is_tw = re.compile(r'https://twitter\.com/(?:\w*)/status/(\d*)', re.DOTALL | re.IGNORECASE).findall(link)
if is_tw:
return "https://twitter.com/u/status/{}".format(is_tw[0])
return link
async def get_repost_by_message(self, message: discord.Message):
guild = message.guild
reposts = await self.config.guild(guild).cache()
for url in reposts:
for k in reposts[url]:
if k['message'] == message.id:
return RepostData(url, reposts[url])
@commands.group(name="reposts")
@checks.admin_or_permissions(manage_messages=True)
@commands.guild_only()
async def _reposts(self, ctx):
"""Paramètres du détecteur de reposts"""
@_reposts.command()
async def toggle(self, ctx):
"""Active/désactive la détection de reposts de liens"""
guild = ctx.guild
if not await self.config.guild(guild).toggled():
await self.config.guild(guild).toggled.set(True)
await ctx.send("**Activé** • Le détecteur de reposts de liens est activé.")
else:
await self.config.guild(guild).toggled.set(False)
await ctx.send("**Désactivé** • Le détecteur de reposts de liens est désactivé.")
@_reposts.command(hidden=True, name="reset")
async def repost_reset(self, ctx):
"""Reset les données du cache"""
guild = ctx.guild
await self.config.guild(guild).clear_raw('cache')
await ctx.send("**Reset effectué avec succès**")
@_reposts.command(name="deleteafter")
async def delete_after(self, ctx, delay: int = -1):
"""Définir un délai après lequel les reposts sont supprimés
Mettre -1 désactive la suppression"""
guild = ctx.guild
if delay >= 0:
await self.config.guild(guild).delete_after.set(delay)
await ctx.send(f"**Délai de suppression configuré** • Les reposts détectés seront supprimés aprèsn {delay} secondes.")
else:
await self.config.guild(guild).delete_after.set(False)
await ctx.send(
f"**Délai de suppression retiré** • Les reposts détectés ne seront plus supprimés.")
@commands.command()
async def autodelete(self, ctx, lien: str = None):
"""Ajouter/retirer une URL à blacklister
Ne rien mettre affiche une liste
Mettre * à la fin de l'URL signifie que tous les URL commençant par votre texte seront supprimés automatiquement"""
guild = ctx.guild
links = await self.config.guild(guild).autodelete()
if lien:
if '*' in lien:
lien = lien.replace('*', '')
if lien not in links['lazy']:
links['lazy'].append(lien)
await ctx.send(f"**Lien ajouté** • Les liens commençant par `{lien}` seront automatiquement supprimés.")
else:
links['lazy'].remove(lien)
await ctx.send(f"**Lien retiré** • Les liens commençant par `{lien}` ne seront plus automatiquement supprimés.")
elif lien in links['greedy']:
links['greedy'].remove(lien)
await ctx.send(f"**Lien retiré** • Le lien `{lien}` ne sera plus supprimé automatiquement.")
elif lien not in links['greedy']:
links['greedy'].append(lien)
await ctx.send(f"**Lien ajouté** • Le lien `{lien}` sera désormais supprimé automatiquement.")
else:
await ctx.send(f"**Commande invalide** : réessayez.")
await self.config.guild(guild).autodelete.set(links)
else:
txt = ""
for l in links['greedy']:
txt += f"- `{l}`\n"
for m in links['lazy']:
txt += f"- `{m}*`\n"
txt = txt if txt else "Aucune autosuppression de lien n'a été configurée"
em = discord.Embed(title="Liens à supprimer automatiquement", description=txt)
await ctx.send(embed=em)
@_reposts.group(name="whitelist")
async def reposts_whitelist(self, ctx):
"""Paramètres concernant l'immunité au détecteur de reposts (whitelist)"""
@reposts_whitelist.command()
async def user(self, ctx, user: discord.Member):
"""Ajouter ou retirer une immunité pour un membre"""
guild = ctx.guild
wl = await self.config.guild(guild).whitelist()
if user.id not in wl['users']:
wl['users'].append(user.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(f"**Ajouté à la whitelist** • {user.name} est désormais immunisé au détecteur de reposts.")
else:
wl['users'].remove(user.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Retiré de la whitelist** • {user.name} n'est désormais plus immunisé au détecteur de reposts.")
@reposts_whitelist.command()
async def channel(self, ctx, channel: discord.TextChannel):
"""Ajouter ou retirer une immunité pour un salon écrit"""
guild = ctx.guild
wl = await self.config.guild(guild).whitelist()
if channel.id not in wl['channels']:
wl['channels'].append(channel.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(f"**Ajouté à la whitelist** • Les reposts postés dans #{channel.name} ne seront plus signalés.")
else:
wl['channels'].remove(channel.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(f"**Retiré de la whitelist** • Les reposts postés dans #{channel.name} seront de nouveau signalés.")
@reposts_whitelist.command()
async def role(self, ctx, role: discord.Role):
"""Ajouter ou retirer une immunité pour un rôle (donc les membres possédant ce rôle)"""
guild = ctx.guild
wl = await self.config.guild(guild).whitelist()
if role.id not in wl['roles']:
wl['roles'].append(role.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Ajouté à la whitelist** • Les membres ayant le rôle {role.name} sont désormais immunisés.")
else:
wl['roles'].remove(role.id)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Retiré de la whitelist** • Les membres avec le rôle {role.name} ne sont plus immunisés.")
@reposts_whitelist.command()
async def link(self, ctx, lien: str):
"""Ajouter ou retirer l'immunité pour un lien, strictement ou non
Si vous ajoutez une étoile à la fin du lien, ce sera tous les liens commençant par ce qu'il y a avant l'étoile qui ne seront pas comptés comme reposts
__Exemples :__
`;repost immune link https://discord.me/qqchose` => immunise seulement le lien `https://discord.me/qqchose`
`;repost immune link https://discord.me/*` => immunise tous les liens commençant par `https://discord.me/`"""
guild = ctx.guild
wl = await self.config.guild(guild).whitelist()
if lien == "https://www.youtube.com/*":
lien = "https://youtu.be/*"
if lien.endswith("*"):
lien = lien[:-1]
if lien not in wl['links_lazy']:
wl['links_lazy'].append(lien)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Whitelisté** • Les liens commençant par `{lien}` ne seront plus comptés comme des reposts.")
else:
wl['links_lazy'].remove(lien)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Retiré de la whitelist** • Les liens commençant par `{lien}` ne sont plus immunisés.")
else:
if lien not in wl['links_greedy']:
wl['links_greedy'].append(lien)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Whitelisté** • Le lien `{lien}` ne pourra plus figurer dans les reposts.")
else:
wl['links_greedy'].remove(lien)
await self.config.guild(guild).whitelist.set(wl)
await ctx.send(
f"**Retiré de la whitelist** • Le lien `{lien}` n'est plus immunisé aux reposts.")
@reposts_whitelist.command(name="list")
async def immune_list(self, ctx):
"""Liste les éléments immunisés contre le détecteur de reposts"""
guild = ctx.guild
em = discord.Embed(title="Elements immunisés contre le détecteur de reposts", color=await ctx.embed_color())
wl = await self.config.guild(guild).whitelist()
if wl['users']:
txt = ""
for u in wl['users']:
user = guild.get_member(u)
txt += f"- {user.mention}\n"
em.add_field(name="Membres", value=txt)
if wl['roles']:
txt = ""
for r in wl['roles']:
role = guild.get_role(r)
txt += f"- {role.mention}\n"
em.add_field(name="Rôles", value=txt)
if wl['channels']:
txt = ""
for c in wl['channels']:
channel = guild.get_channel(c)
txt += f"- {channel.mention}\n"
em.add_field(name="Salons écrits", value=txt)
links = ""
if wl['links_greedy']:
for l in wl['links_greedy']:
links += f"- `{l}`\n"
if wl['links_lazy']:
for l in wl['links_lazy']:
links += f"- `{l}*`\n"
if links:
em.add_field(name="Liens", value=links)
em.set_footer(text="* = Liens commençant par ...")
await ctx.send(embed=em)
@commands.command(name="links")
async def disp_links(self, ctx, nb: typing.Optional[int] = 10, *, contain: str = None):
"""Affiche les X derniers liens détectés (reposts ou non)
Il est possible de préciser un morceau de texte qui doit être contenu dans les liens recherchés"""
guild = ctx.guild
data = await self.config.guild(guild).cache()
links = {}
for url in data:
if contain:
if contain not in url.lower():
continue
if data[url]:
links[url] = datetime.now().fromisoformat(data[url][-1]['timestamp']).timestamp()
if links:
txt = ""
for u in sorted(links, key=links.get, reverse=True)[:nb]:
txt += f"• <{u}>\n"
if contain:
em = discord.Embed(title=f"{nb} Derniers liens postés contenant \"{contain}\"", description=txt,
color=await self.bot.get_embed_color(ctx.channel))
else:
em = discord.Embed(title=f"{nb} Derniers liens postés", description=txt,
color=await self.bot.get_embed_color(ctx.channel))
em.set_footer(text="Données des 14 derniers jours seulement")
await ctx.send(embed=em)
else:
await ctx.send(
f"**Liste vide** • Aucun lien conforme à votre recherche n'a été posté récemment.")
@commands.Cog.listener()
async def on_message(self, message):
if message.guild:
scan = None
guild = message.guild
if await self.config.guild(guild).toggled():
content = message.content
if "http" in content:
scan = re.compile(r'(https?://\S*\.\S*)', re.DOTALL | re.IGNORECASE).findall(content)
if scan:
url = self.canon_link(scan[0])
if await self.is_whitelisted(message, url):
return
if message.author == self.bot.user:
return
r = {'message': message.id, 'jump_url': message.jump_url, 'author': message.author.id,
'timestamp': datetime.now().isoformat()}
if url in await self.config.guild(guild).cache():
repost = await self.config.guild(guild).cache.get_raw(url)
repost.append(r)
await self.config.guild(guild).cache.set_raw(url, value=repost)
dafter = await self.config.guild(guild).delete_after()
if dafter:
try:
await message.delete(delay=dafter)
except:
raise discord.DiscordException(f"Impossible de supprimer le message {message.id}")
else:
try:
await message.add_reaction(self.repost_emoji)
except:
raise discord.DiscordException(f"Impossible d'ajouter un emoji au message {message.id}")
else:
await self.config.guild(guild).cache.set_raw(url, value=[r])
autodel = await self.config.guild(guild).autodelete()
if autodel['lazy'] or autodel['greedy']:
if not scan:
scan = re.compile(r'(https?://\S*\.\S*)', re.DOTALL | re.IGNORECASE).findall(content)
if scan:
for url in scan:
if url in autodel['greedy'] or [l for l in autodel['lazy'] if url.startswith(l)]:
try:
await message.delete()
except:
raise discord.DiscordException(f"Impossible de supprimer le message {message.id}")
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
channel = self.bot.get_channel(payload.channel_id)
emoji = payload.emoji
if hasattr(channel, "guild"):
guild = channel.guild
data = await self.config.guild(guild).all()
if data["toggled"] and emoji == self.repost_emoji:
message = await channel.fetch_message(payload.message_id)
user = guild.get_member(payload.user_id)
if user == self.bot.user:
return
rdata = await self.get_repost_by_message(message)
if rdata:
txt = ""
repost = rdata.data
em = discord.Embed(title=f"{self.repost_emoji} Liste des reposts",
description=box(rdata.url),
color=await self.bot.get_embed_color(message.channel))
em.set_footer(text="Données des 14 derniers jours")
chunk = repost[1:] if len(repost) <= 9 else repost[-9:]
r = repost[0]
ts = datetime.now().fromisoformat(r.timestamp).strftime('%d/%m/%Y %H:%M')
author = guild.get_member(r.author)
author = f"**{author.name}**#{author.discriminator}" if author else f"ID: {r.author}"
em.add_field(name="Premier post", value=f"[Le {ts}]({r.jump_url}) par {author}",
inline=False)
for s in chunk:
ts = datetime.now().fromisoformat(s.timestamp).strftime('%d/%m/%Y %H:%M')
author = guild.get_member(s.author)
author = f"**{author.name}**#{author.discriminator}" if author else f"ID: {s.author}"
txt += f"• [Le {ts}]({s.jump_url}) par {author}\n"
em.add_field(name="Re-posts", value=txt, inline=False)
try:
await user.send(embed=em)
except:
raise
else:
await message.remove_reaction(self.repost_emoji, user)
| 45.532407
| 158
| 0.542298
|
34ea8d41218c30f146cef49a40c239a44eec47ee
| 761
|
py
|
Python
|
tests/test_database/test_user_collection/test_is_user_exist.py
|
Gliger13/bdo_daily_bot
|
d569405fcae1978c2bb1ac34d1f75936040a3552
|
[
"MIT"
] | null | null | null |
tests/test_database/test_user_collection/test_is_user_exist.py
|
Gliger13/bdo_daily_bot
|
d569405fcae1978c2bb1ac34d1f75936040a3552
|
[
"MIT"
] | null | null | null |
tests/test_database/test_user_collection/test_is_user_exist.py
|
Gliger13/bdo_daily_bot
|
d569405fcae1978c2bb1ac34d1f75936040a3552
|
[
"MIT"
] | null | null | null |
"""Test that checking for an existing user is correct."""
import pytest
from core.database.user_collection import UserCollection
from test_framework.asserts.database_asserts.check_user_collection import check_is_user_exist
from test_framework.scripts.common.data_factory import get_test_data
@pytest.mark.asyncio
@pytest.mark.parametrize('test_data', get_test_data(__file__))
async def test_is_user_exist(user_collection: UserCollection, test_data: dict):
"""
Test that checking for an existing user is correct.
:param user_collection: Database user collection.
:type user_collection: UserCollection
:param test_data: User collection test data.
:type test_data: dict
"""
await check_is_user_exist(user_collection, test_data)
| 36.238095
| 93
| 0.798949
|
a25ba8ef36dbbb132bf88b1efd7b086c22128411
| 1,538
|
py
|
Python
|
conftest.py
|
rohankumardubey/structlog
|
cad14e066c5f181ecebea5d053bec28b8e215e4a
|
[
"Apache-2.0",
"MIT"
] | 1,751
|
2015-01-04T05:58:49.000Z
|
2022-03-31T02:17:29.000Z
|
conftest.py
|
rohankumardubey/structlog
|
cad14e066c5f181ecebea5d053bec28b8e215e4a
|
[
"Apache-2.0",
"MIT"
] | 336
|
2015-01-06T15:42:06.000Z
|
2022-03-16T11:31:00.000Z
|
conftest.py
|
rohankumardubey/structlog
|
cad14e066c5f181ecebea5d053bec28b8e215e4a
|
[
"Apache-2.0",
"MIT"
] | 181
|
2015-01-14T14:45:31.000Z
|
2022-03-21T23:17:38.000Z
|
# SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
import logging
import sys
from io import StringIO
import pytest
from structlog._log_levels import _NAME_TO_LEVEL
from structlog.testing import CapturingLogger
try:
import twisted
except ImportError:
twisted = None
LOGGER = logging.getLogger()
@pytest.fixture(autouse=True)
def _ensure_logging_framework_not_altered():
"""
Prevents 'ValueError: I/O operation on closed file.' errors.
"""
before_handlers = list(LOGGER.handlers)
yield
LOGGER.handlers = before_handlers
@pytest.fixture(name="sio")
def _sio():
"""
A new StringIO instance.
"""
return StringIO()
@pytest.fixture
def event_dict():
"""
An example event dictionary with multiple value types w/o the event itself.
"""
class A:
def __repr__(self):
return r"<A(\o/)>"
return {"a": A(), "b": [3, 4], "x": 7, "y": "test", "z": (1, 2)}
@pytest.fixture(
name="stdlib_log_method",
params=[m for m in _NAME_TO_LEVEL if m != "notset"],
)
def _stdlib_log_methods(request):
return request.param
@pytest.fixture(name="cl")
def _cl():
return CapturingLogger()
collect_ignore = []
if sys.version_info[:2] < (3, 7):
collect_ignore.append("tests/test_contextvars.py")
if twisted is None:
collect_ignore.append("tests/test_twisted.py")
| 20.236842
| 79
| 0.682055
|
60cf2fbc694e601aae4e7e98958d2ae396de9966
| 4,321
|
py
|
Python
|
sklearn_pandas/__init__.py
|
Gwillink/sklearn-pandas
|
7ecd25dcb03c5aadf3e3d80983830f580e6686a8
|
[
"BSD-2-Clause"
] | 1
|
2020-09-19T05:24:36.000Z
|
2020-09-19T05:24:36.000Z
|
sklearn_pandas/__init__.py
|
Gwillink/sklearn-pandas
|
7ecd25dcb03c5aadf3e3d80983830f580e6686a8
|
[
"BSD-2-Clause"
] | null | null | null |
sklearn_pandas/__init__.py
|
Gwillink/sklearn-pandas
|
7ecd25dcb03c5aadf3e3d80983830f580e6686a8
|
[
"BSD-2-Clause"
] | null | null | null |
__version__ = '0.0.8'
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import cross_validation
from sklearn import grid_search
def cross_val_score(model, X, *args, **kwargs):
X = DataWrapper(X)
return cross_validation.cross_val_score(model, X, *args, **kwargs)
class GridSearchCV(grid_search.GridSearchCV):
def fit(self, X, *params, **kwparams):
super(GridSearchCV, self).fit(DataWrapper(X), *params, **kwparams)
def predict(self, X, *params, **kwparams):
super(GridSearchCV, self).fit(DataWrapper(X), *params, **kwparams)
try:
class RandomizedSearchCV(grid_search.RandomizedSearchCV):
def fit(self, X, *params, **kwparams):
super(RandomizedSearchCV, self).fit(DataWrapper(X), *params, **kwparams)
def predict(self, X, *params, **kwparams):
super(RandomizedSearchCV, self).fit(DataWrapper(X), *params, **kwparams)
except AttributeError:
pass
class DataWrapper(object):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, key):
return self.df.iloc[key]
class DataFrameMapper(BaseEstimator, TransformerMixin):
'''
Map Pandas data frame column subsets to their own
sklearn transformation.
'''
def __init__(self, features):
'''
Params:
features a list of pairs. The first element is the pandas column
selector. This can be a string (for one column) or a list
of strings. The second element is an object that supports
sklearn's transform interface.
'''
self.features = features
def _get_col_subset(self, X, cols):
'''
Get a subset of columns from the given table X.
X a Pandas dataframe; the table to select columns from
cols a string or list of strings representing the columns
to select
Returns a numpy array with the data from the selected columns
'''
if isinstance(cols, basestring):
cols = [cols]
if isinstance(X, list):
X = [x[cols] for x in X]
X = pd.DataFrame(X)
elif isinstance(X, DataWrapper):
# if it's a datawrapper, unwrap it
X = X.df
if len(cols) == 1:
t = X[cols[0]]
else:
t = X.as_matrix(cols)
# there is an sklearn bug (#2374) which causes weird behavior
# when 'object' type arrays are passed to labelling functions.
# To get around this, in cases where all columns are strings
# (represnted as object by Pandas), we convert the dtype to
# numpy's string type
if np.all(X.dtypes[cols] == 'object'):
t = np.array(t, dtype='|S')
return t
def fit(self, X, y=None):
'''
Fit a transformation from the pipeline
X the data to fit
'''
for columns, transformer in self.features:
if transformer is not None:
transformer.fit(self._get_col_subset(X, columns))
return self
def transform(self, X):
'''
Transform the given data. Assumes that fit has already been called.
X the data to transform
'''
extracted = []
for columns, transformer in self.features:
# columns could be a string or list of
# strings; we don't care because pandas
# will handle either.
if transformer is not None:
fea = transformer.transform(self._get_col_subset(X, columns))
else:
fea = self._get_col_subset(X, columns)
if hasattr(fea, 'toarray'):
# sparse arrays should be converted to regular arrays
# for hstack.
fea = fea.toarray()
if len(fea.shape) == 1:
fea = np.array([fea]).T
extracted.append(fea)
# combine the feature outputs into one array.
# at this point we lose track of which features
# were created from which input columns, so it's
# assumed that that doesn't matter to the model.
return np.hstack(extracted)
| 30.64539
| 84
| 0.592224
|
15b87a301dd7282f1bea3f2e8362727d26eb6ef6
| 8,339
|
py
|
Python
|
seq2seq/lf_util/sql_parser.py
|
JuruoMP/gap-exp
|
2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
[
"Apache-2.0"
] | null | null | null |
seq2seq/lf_util/sql_parser.py
|
JuruoMP/gap-exp
|
2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
[
"Apache-2.0"
] | null | null | null |
seq2seq/lf_util/sql_parser.py
|
JuruoMP/gap-exp
|
2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05
|
[
"Apache-2.0"
] | null | null | null |
import json
WHERE_OPS = (
"not",
"between",
"=",
">",
"<",
">=",
"<=",
"!=",
"in",
"like",
"is",
"exists",
)
UNIT_OPS = ("none", "-", "+", "*", "/")
AGG_OPS = ("none", "max", "min", "count", "sum", "avg")
TABLE_TYPE = {
"sql": "sql",
"table_unit": "table_unit",
}
COND_OPS = ("and", "or")
SQL_OPS = ("intersect", "union", "except")
ORDER_OPS = ("desc", "asc")
class SqlParser:
def __init__(self, table_path):
tables = json.load(open(table_path))
self.tables = {x['db_id']: x for x in tables}
self.current_table = None
def unparse(self, db_id, sql_dict):
self.current_table = self.tables[db_id]
return self._unparse(sql_dict)
def _unparse(self, sql_dict):
select_clause = self._unparse_select(sql_dict['select'])
from_clause = self._unparse_from(sql_dict['from'])
where_clause = groupby_clause = having_clause = orderby_clause = limit_clause = ''
intersect_clause = except_clause = union_clause = ''
if sql_dict['where']:
where_clause = self._unparse_where(sql_dict['where'])
if sql_dict['groupBy']:
groupby_clause = self._unparse_groupby(sql_dict['groupBy'])
if sql_dict['having']:
having_clause = self._unparse_having(sql_dict['having'])
if sql_dict['orderBy']:
orderby_clause = self._unparse_orderby(sql_dict['orderBy'])
if sql_dict['limit']:
limit_clause = self._unparse_limit(sql_dict['limit'])
if sql_dict['intersect']:
intersect_clause = 'INTERSECT ' + self._unparse(sql_dict['intersect'])
if sql_dict['except']:
except_clause = 'EXCEPT ' + self._unparse(sql_dict['except'])
if sql_dict['union']:
union_clause = 'UNION ' + self._unparse(sql_dict['union'])
sql = ' '.join([x for x in [select_clause, from_clause, where_clause,
groupby_clause, having_clause, orderby_clause, limit_clause,
intersect_clause, except_clause, union_clause] if x != ''])
return sql
def _unparse_select(self, _sel):
is_distinct = _sel[0]
sel_list = []
for sel_item in _sel[1]:
agg_id, val_unit = sel_item
unit_op, col_unit1, col_unit2 = val_unit
sel_item_str = self._unparse_col_unit(col_unit1)
if unit_op != 0:
print('Warning: calculation between columns are used')
sel_item2_str = self._unparse_col_unit(col_unit2)
sel_item_str = ' '.join([sel_item_str, UNIT_OPS[unit_op], sel_item2_str])
if agg_id > 0:
sel_item_str = f'{AGG_OPS[agg_id]}({sel_item_str})'
if is_distinct:
sel_item_str = 'DISTINCT ' + sel_item_str
sel_list.append(sel_item_str)
return 'SELECT ' + ', '.join(sel_list)
def _unparse_from(self, _from):
table_units = _from['table_units']
conds = _from['conds']
table_unit_str_list = []
for table_unit in table_units:
table_type, table_id_or_sql = table_unit
if table_type == 'table_unit':
table_unit_str_list.append(self.current_table['table_names_original'][table_id_or_sql])
else:
table_unit_str_list.append(self._unparse(table_id_or_sql))
cond_str_list = self._unparse_condition(conds, return_list=True)
assert all(x != 'or' for x in cond_str_list)
cond_str_list = [x for x in cond_str_list if x not in ('and', 'or')]
# assert len(table_unit_str_list) == len(cond_str_list) + 1 # assertion on number of join condition
str_segs = [table_unit_str_list[0]]
for i in range(1, len(table_unit_str_list)):
str_segs.append('JOIN')
str_segs.append(table_unit_str_list[i])
if cond_str_list:
str_segs.append('ON')
str_segs.append(cond_str_list[0])
for i in range(1, len(cond_str_list)):
str_segs.append(cond_str_list[i])
return 'FROM ' + ' '.join(str_segs)
def _unparse_where(self, _where):
clause = 'WHERE ' + self._unparse_condition(_where)
return clause
def _unparse_groupby(self, _groupby):
gb_str_list = []
for gb_item in _groupby:
gb_str = self._unparse_col_unit(gb_item)
gb_str_list.append(gb_str)
return 'GROUP BY ' + ', '.join(gb_str_list)
def _unparse_orderby(self, _orderby):
order_op_str = _orderby[0]
val_unit_str_list = []
for val_unit in _orderby[1]:
unit_op, col_unit1, col_unit2 = val_unit
col_unit_str = self._unparse_col_unit(col_unit1)
if unit_op != 0:
print('Warning: calculation between columns are used')
col_unit2_str = self._unparse_col_unit(col_unit2)
col_unit_str = ' '.join([col_unit_str, UNIT_OPS[unit_op], col_unit2_str])
val_unit_str_list.append(col_unit_str)
clause = 'ORDER BY ' + ', '.join(val_unit_str_list) + ' ' + order_op_str
return clause
def _unparse_having(self, _having):
clause = 'HAVING ' + self._unparse_condition(_having)
return clause
def _unparse_limit(self, limit):
return 'LIMIT ' + str(limit)
def _unparse_col_unit(self, col_unit):
agg_id, col_id, is_distinct = col_unit
clause = ''
table_id, column_name = self.current_table['column_names_original'][col_id]
if table_id >= 0:
column_name = self.current_table['table_names_original'][table_id] + '.' + column_name
clause += column_name
if agg_id > 0:
clause = AGG_OPS[agg_id] + ' ' + clause
if is_distinct:
clause = 'DISTINCT ' + clause
return clause
def _unparse_condition(self, condition, return_list=False):
cond_str_list = []
for cond_unit in condition:
if cond_unit in ('and', 'or'):
cond_str_list.append(cond_unit)
else:
#cond unit
not_op, op_id, val_unit, val1, val2 = cond_unit
op_str = WHERE_OPS[op_id]
# val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit_str = self._unparse_col_unit(col_unit1)
if unit_op != 0:
print('Warning: calculation between columns are used')
unit_op_str = UNIT_OPS[unit_op]
col_unit2_str = self._unparse_col_unit(col_unit2)
col_unit_str = ' '.join([col_unit_str, unit_op_str, col_unit2_str])
val1_str = self._unparse_val(val1)
val2_str = self._unparse_val(val2)
if not_op:
assert op_str in ('in', 'like') # todo: check here
op_str = 'NOT ' + op_str
if 'between' not in op_str.lower():
cond_str_list.append(f'{col_unit_str} {op_str} {val1_str}')
else:
assert op_str.lower() == 'between'
cond_str_list.append(f'{col_unit_str} {op_str} {val1_str} AND {val2_str}')
if return_list is False:
return ' '.join(cond_str_list)
else:
return cond_str_list
def _unparse_val(self, val):
if val is None:
return None
if isinstance(val, str):
val_str = val
elif isinstance(val, dict):
val_str = self._unparse(val)
elif isinstance(val, int) or isinstance(val, float):
val_str = str(val)
else:
val_str = self._unparse_col_unit(val)
return val_str
if __name__ == '__main__':
parser = SqlParser('data/cosql/tables.json')
dev_data = json.load(open('data/cosql/train.json'))
for i in range(len(dev_data)):
print(i)
for j in range(len(dev_data[i]['interaction'])):
case_db = dev_data[i]['database_id']
case = dev_data[i]['interaction'][j]['sql']
sql = dev_data[i]['interaction'][j]['query']
print(sql)
print(parser.unparse(case_db, case))
print()
| 39.334906
| 108
| 0.578726
|
aa5104b9f0c9738217815b3d551c2f246d4b1500
| 74,916
|
py
|
Python
|
veriloggen/thread/axi.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/thread/axi.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/thread/axi.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import math
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
import veriloggen.types.util as util
import veriloggen.types.axi as axi
from veriloggen.seq.seq import Seq, TmpSeq
from veriloggen.fsm.fsm import FSM, TmpFSM
from veriloggen.optimizer import try_optimize as optimize
from veriloggen.dataflow.dtypes import make_condition
from .ttypes import _MutexFunction
from .ram import RAM, FixedRAM, MultibankRAM, to_multibank_ram
class AXIM(axi.AxiMaster, _MutexFunction):
""" AXI Master Interface with DMA controller """
__intrinsics__ = ('read', 'write',
'dma_read', 'dma_read_async',
'dma_write', 'dma_write_async',
'dma_wait_read', 'dma_wait_write',
'dma_wait_read', 'dma_wait_write', 'dma_wait',
'set_global_base_addr',) + _MutexFunction.__intrinsics__
burstlen = 256
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
waddr_burst_mode=axi.BURST_INCR, raddr_burst_mode=axi.BURST_INCR,
waddr_cache_mode=axi.AxCACHE_NONCOHERENT, raddr_cache_mode=axi.AxCACHE_NONCOHERENT,
waddr_prot_mode=axi.AxPROT_NONCOHERENT, raddr_prot_mode=axi.AxPROT_NONCOHERENT,
waddr_user_mode=axi.AxUSER_NONCOHERENT, wdata_user_mode=axi.xUSER_DEFAULT,
raddr_user_mode=axi.AxUSER_NONCOHERENT,
noio=False,
enable_async=False, use_global_base_addr=False,
num_cmd_delay=0, num_data_delay=0,
op_sel_width=8, fsm_as_module=False):
axi.AxiMaster.__init__(self, m, name, clk, rst, datawidth, addrwidth,
waddr_id_width, wdata_id_width, wresp_id_width,
raddr_id_width, rdata_id_width,
waddr_user_width, wdata_user_width, wresp_user_width,
raddr_user_width, rdata_user_width,
waddr_burst_mode, raddr_burst_mode,
waddr_cache_mode, raddr_cache_mode,
waddr_prot_mode, raddr_prot_mode,
waddr_user_mode, wdata_user_mode,
raddr_user_mode,
noio)
self.enable_async = enable_async
self.use_global_base_addr = use_global_base_addr
self.num_cmd_delay = num_cmd_delay
self.num_data_delay = num_data_delay
self.op_sel_width = op_sel_width
self.fsm_as_module = fsm_as_module
self.mutex = None
self.read_start = self.m.Reg('_'.join(['', self.name, 'read_start']),
initval=0)
self.read_op_sel = self.m.Reg('_'.join(['', self.name, 'read_op_sel']),
self.op_sel_width, initval=0)
self.read_local_addr = self.m.Reg('_'.join(['', self.name, 'read_local_addr']),
self.addrwidth, initval=0)
self.read_global_addr = self.m.Reg('_'.join(['', self.name, 'read_global_addr']),
self.addrwidth, initval=0)
self.read_size = self.m.Reg('_'.join(['', self.name, 'read_size']),
self.addrwidth + 1, initval=0)
self.read_local_stride = self.m.Reg('_'.join(['', self.name, 'read_local_stride']),
self.addrwidth, initval=0)
self.read_idle = self.m.Reg(
'_'.join(['', self.name, 'read_idle']), initval=1)
self.seq(
self.read_start(0)
)
self.read_op_id_map = OrderedDict()
self.read_op_id_count = 1
self.read_reqs = OrderedDict()
self.read_ops = []
self.read_fsm = None
self.read_data_wire = None
self.read_valid_wire = None
self.read_narrow_fsms = OrderedDict() # key: pack_size
self.read_narrow_pack_counts = OrderedDict() # key: pack_size
self.read_narrow_data_wires = OrderedDict() # key: pack_size
self.read_narrow_valid_wires = OrderedDict() # key: pack_size
self.read_wide_fsms = OrderedDict() # key: pack_size
self.read_wide_pack_counts = OrderedDict() # key: pack_size
self.read_wide_data_wires = OrderedDict() # key: pack_size
self.read_wide_valid_wires = OrderedDict() # key: pack_size
self.write_start = self.m.Reg('_'.join(['', self.name, 'write_start']),
initval=0)
self.write_op_sel = self.m.Reg('_'.join(['', self.name, 'write_op_sel']),
self.op_sel_width, initval=0)
self.write_local_addr = self.m.Reg('_'.join(['', self.name, 'write_local_addr']),
self.addrwidth, initval=0)
self.write_global_addr = self.m.Reg('_'.join(['', self.name, 'write_global_addr']),
self.addrwidth, initval=0)
self.write_size = self.m.Reg('_'.join(['', self.name, 'write_size']),
self.addrwidth + 1, initval=0)
self.write_local_stride = self.m.Reg('_'.join(['', self.name, 'write_local_stride']),
self.addrwidth, initval=0)
self.write_idle = self.m.Reg(
'_'.join(['', self.name, 'write_idle']), initval=1)
self.seq(
self.write_start(0)
)
if self.use_global_base_addr:
self.global_base_addr = self.m.Reg('_'.join(['', self.name, 'global_base_addr']),
self.addrwidth, initval=0)
else:
self.global_base_addr = None
self.write_op_id_map = OrderedDict()
self.write_op_id_count = 1
self.write_reqs = OrderedDict()
self.write_ops = []
self.write_fsm = None
self.write_data_counter = None
self.write_data_done = self.m.Wire(
'_'.join(['', self.name, 'write_data_done']))
self.write_narrow_fsms = OrderedDict() # key: pack_size
self.write_narrow_wdatas = OrderedDict() # key: pack_size
self.write_narrow_wvalids = OrderedDict() # key: pack_size
self.write_narrow_wreadys = OrderedDict() # key: pack_size
self.write_narrow_pack_counts = OrderedDict() # key: pack_size
self.write_wide_fsms = OrderedDict() # key: pack_size
self.write_wide_wdatas = OrderedDict() # key: pack_size
self.write_wide_wvalids = OrderedDict() # key: pack_size
self.write_wide_wreadys = OrderedDict() # key: pack_size
self.write_wide_pack_counts = OrderedDict() # key: pack_size
def read(self, fsm, global_addr):
if self.use_global_base_addr:
global_addr = self.global_base_addr + global_addr
ret = self.read_request(global_addr, length=1, cond=fsm)
if isinstance(ret, (tuple)):
ack, counter = ret
else:
ack = ret
fsm.If(ack).goto_next()
ret = self.read_data(cond=fsm)
if len(ret) == 3:
data, valid, last = ret
else:
data, valid = ret
rdata = self.m.TmpReg(self.datawidth, initval=0,
signed=True, prefix='axim_rdata')
fsm.If(valid)(rdata(data))
fsm.Then().goto_next()
return rdata
def write(self, fsm, global_addr, value):
if self.use_global_base_addr:
global_addr = self.global_base_addr + global_addr
ret = self.write_request(global_addr, length=1, cond=fsm)
if isinstance(ret, (tuple)):
ack, counter = ret
else:
ack = ret
fsm.If(ack).goto_next()
ret = self.write_data(value, cond=fsm)
if isinstance(ret, (tuple)):
ack, last = ret
else:
ack, last = ret, None
fsm.If(ack).goto_next()
def dma_read(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if self.enable_async:
self.dma_wait_read(fsm)
self._dma_read(fsm, ram, local_addr, global_addr, size,
local_stride, port, ram_method)
self.dma_wait_read(fsm)
def dma_read_async(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.dma_wait_read(fsm)
self._dma_read(fsm, ram, local_addr, global_addr, size,
local_stride, port, ram_method)
def dma_write(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if self.enable_async:
self.dma_wait_write(fsm)
self._dma_write(fsm, ram, local_addr, global_addr, size,
local_stride, port, ram_method)
self.dma_wait_write(fsm)
def dma_write_async(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.dma_wait_write(fsm)
self._dma_write(fsm, ram, local_addr, global_addr, size,
local_stride, port, ram_method)
def dma_wait_read(self, fsm):
fsm.If(self.read_idle).goto_next()
def dma_wait_write(self, fsm):
fsm.If(self.write_idle).goto_next()
def dma_wait(self, fsm):
fsm.If(self.read_idle, self.write_idle).goto_next()
def set_global_base_addr(self, fsm, addr):
if not self.use_global_base_addr:
raise ValueError("global_base_addr is disabled.")
flag = self._set_flag(fsm)
self.seq.If(flag)(
self.global_base_addr(addr)
)
# --------------------
# read
# --------------------
def _dma_read(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if isinstance(ram, (tuple, list)):
ram = to_multibank_ram(ram)
if not isinstance(ram, (RAM, MultibankRAM)):
raise TypeError('RAM object is required.')
if ram_method is None:
ram_method = getattr(ram, 'write_dataflow')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_read_request(ram, port, ram_method, start,
local_addr, global_addr, size, local_stride)
self._synthesize_read_fsm(ram, port, ram_method)
fsm.goto_next()
def _set_read_request(self, ram, port, ram_method, start,
local_addr, global_addr, size, local_stride):
op_id = self._get_read_op_id(ram, port, ram_method)
if op_id in self.read_reqs:
(read_start, read_op_sel,
read_local_addr_in, read_global_addr_in,
read_size_in, read_local_stride_in) = self.read_reqs[op_id]
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr_in(local_addr),
read_global_addr_in(global_addr),
read_size_in(size),
read_local_stride_in(local_stride)
)
return
port = str(vtypes.to_int(port))
read_start = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_start']),
initval=0)
read_op_sel = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_op_sel']),
self.op_sel_width, initval=0)
read_local_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_addr']),
self.addrwidth, initval=0)
read_global_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_global_addr']),
self.addrwidth, initval=0)
read_size = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_size']),
self.addrwidth + 1, initval=0)
read_local_stride = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_stride']),
self.addrwidth, initval=0)
self.seq(
read_start(0)
)
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr(local_addr),
read_global_addr(global_addr),
read_size(size),
read_local_stride(local_stride)
)
self.read_reqs[op_id] = (read_start, read_op_sel,
read_local_addr, read_global_addr,
read_size, read_local_stride)
if self.num_cmd_delay > 0:
read_start = self.seq.Prev(read_start, self.num_cmd_delay)
read_op_sel = self.seq.Prev(read_op_sel, self.num_cmd_delay)
read_local_addr = self.seq.Prev(
read_local_addr, self.num_cmd_delay)
read_global_addr = self.seq.Prev(
read_global_addr, self.num_cmd_delay)
read_size = self.seq.Prev(read_size, self.num_cmd_delay)
read_local_stride = self.seq.Prev(
read_local_stride, self.num_cmd_delay)
self.seq.If(read_start)(
self.read_idle(0)
)
self.seq.If(read_start)(
self.read_start(1),
self.read_op_sel(read_op_sel),
self.read_local_addr(read_local_addr),
self.read_global_addr(read_global_addr),
self.read_size(read_size),
self.read_local_stride(read_local_stride)
)
def _synthesize_read_fsm(self, ram, port, ram_method):
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
ram_datawidth = (ram.datawidth if ram_method is None else
ram.orig_datawidth if 'bcast' in ram_method_name else
ram.orig_datawidth if 'block' in ram_method_name else
ram.datawidth)
if not isinstance(self.datawidth, int):
raise TypeError("axi.datawidth must be int, not '%s'" %
str(type(self.datawidth)))
if not isinstance(ram_datawidth, int):
raise TypeError("ram_datawidth must be int, not '%s'" %
str(type(ram_datawidth)))
if self.datawidth == ram_datawidth:
return self._synthesize_read_fsm_same(ram, port, ram_method, ram_datawidth)
if self.datawidth < ram_datawidth:
return self._synthesize_read_fsm_narrow(ram, port, ram_method, ram_datawidth)
return self._synthesize_read_fsm_wide(ram, port, ram_method, ram_datawidth)
def _synthesize_read_fsm_same(self, ram, port, ram_method, ram_datawidth):
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if self.read_fsm is not None:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_fsm
data = self.read_data_wire
valid = self.read_valid_wire
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1)
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'read_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_fsm = fsm
self.read_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name, 'read_cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name, 'read_cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name, 'read_rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
if not self.use_global_base_addr:
gaddr = self.read_global_addr
else:
gaddr = self.read_global_addr + self.global_base_addr
fsm.If(self.read_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(self.read_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
ack, counter = self.read_request(cur_global_addr, cur_size, cond=fsm)
fsm.If(ack).goto_next()
# state 3
data, valid, last = self.read_data(cond=fsm)
self.read_data_wire = data
self.read_valid_wire = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1),
)
fsm.If(valid, last)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(valid, last, rest_size > 0).goto(check_state)
fsm.If(valid, last, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_narrow(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth < ram.datawidth """
if ram_datawidth % self.datawidth != 0:
raise ValueError(
'ram_datawidth must be multiple number of axi.datawidth')
pack_size = ram_datawidth // self.datawidth
dma_size = (self.read_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.read_size * pack_size)
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_narrow_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_narrow_fsms[pack_size]
pack_count = self.read_narrow_pack_counts[pack_size]
data = self.read_narrow_data_wires[pack_size]
valid = self.read_narrow_valid_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_narrow_fsms[pack_size] = fsm
self.read_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
if not self.use_global_base_addr:
gaddr = self.read_global_addr
else:
gaddr = self.read_global_addr + self.global_base_addr
fsm.If(self.read_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
ack, counter = self.read_request(cur_global_addr, cur_size, cond=fsm)
fsm.If(ack).goto_next()
# state 3
pack_count = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_narrow_pack_counts[pack_size] = pack_count
data, valid, last = self.read_data(cond=fsm)
self.read_narrow_data_wires[pack_size] = data
self.read_narrow_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid, last)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(valid, last, rest_size > 0).goto(check_state)
fsm.If(valid, last, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_wide(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth > ram.datawidth """
if self.datawidth % ram_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of ram_datawidth')
pack_size = self.datawidth // ram_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.read_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.read_size >> shamt) + res
actual_read_size = dma_size << shamt
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_wide_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_wide_fsms[pack_size]
pack_count = self.read_wide_pack_counts[pack_size]
data = self.read_wide_data_wires[pack_size]
valid = self.read_wide_valid_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_wide_fsms[pack_size] = fsm
self.read_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
if not self.use_global_base_addr:
gaddr = self.read_global_addr
else:
gaddr = self.read_global_addr + self.global_base_addr
fsm.If(self.read_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
last_done = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'last_done']), initval=0)
fsm(
last_done(0)
)
ack, counter = self.read_request(cur_global_addr, cur_size, cond=fsm)
fsm.If(ack).goto_next()
# state 3
pack_count = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_wide_pack_counts[pack_size] = pack_count
cond = vtypes.Ands(fsm.here, pack_count == 0)
data, valid, last = self.read_data(cond=cond)
self.read_wide_data_wires[pack_size] = data
self.read_wide_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count == pack_size - 1)(
pack_count(0)
)
fsm.If(pack_count == 0, valid, last)(
last_done(1)
)
fsm.If(last_done, pack_count == pack_size - 1)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(last_done, pack_count == pack_size - 1,
rest_size > 0).goto(check_state)
fsm.If(last_done, pack_count == pack_size - 1,
rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
# --------------------
# write
# --------------------
def _dma_write(self, fsm, ram, local_addr, global_addr, size,
local_stride=1, port=0, ram_method=None):
if isinstance(ram, (tuple, list)):
ram = to_multibank_ram(ram)
if not isinstance(ram, (RAM, MultibankRAM)):
raise TypeError('RAM object is required.')
if ram_method is None:
ram_method = getattr(ram, 'read_dataflow')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_write_request(ram, port, ram_method, start,
local_addr, global_addr, size, local_stride)
self._synthesize_write_fsm(ram, port, ram_method)
fsm.goto_next()
def _set_write_request(self, ram, port, ram_method, start,
local_addr, global_addr, size, local_stride):
op_id = self._get_write_op_id(ram, port, ram_method)
if op_id in self.write_reqs:
(write_start, write_op_sel,
write_local_addr_in, write_global_addr_in,
write_size_in, write_local_stride_in) = self.write_reqs[op_id]
self.seq.If(start)(
write_start(1),
write_op_sel(op_id),
write_local_addr_in(local_addr),
write_global_addr_in(global_addr),
write_size_in(size),
write_local_stride_in(local_stride)
)
return
port = str(vtypes.to_int(port))
write_start = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_start']),
initval=0)
write_op_sel = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_op_sel']),
self.op_sel_width, initval=0)
write_local_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_local_addr']),
self.addrwidth, initval=0)
write_global_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_global_addr']),
self.addrwidth, initval=0)
write_size = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_size']),
self.addrwidth + 1, initval=0)
write_local_stride = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'write_local_stride']),
self.addrwidth, initval=0)
self.seq(
write_start(0)
)
self.seq.If(start)(
write_start(1),
write_op_sel(op_id),
write_local_addr(local_addr),
write_global_addr(global_addr),
write_size(size),
write_local_stride(local_stride)
)
self.write_reqs[op_id] = (write_start, write_op_sel,
write_local_addr, write_global_addr,
write_size, write_local_stride)
if self.num_cmd_delay > 0:
write_start = self.seq.Prev(write_start, self.num_cmd_delay)
write_op_sel = self.seq.Prev(write_op_sel, self.num_cmd_delay)
write_local_addr = self.seq.Prev(
write_local_addr, self.num_cmd_delay)
write_global_addr = self.seq.Prev(
write_global_addr, self.num_cmd_delay)
write_size = self.seq.Prev(write_size, self.num_cmd_delay)
write_local_stride = self.seq.Prev(
write_local_stride, self.num_cmd_delay)
self.seq.If(write_start)(
self.write_idle(0)
)
self.seq.If(write_start)(
self.write_start(1),
self.write_op_sel(write_op_sel),
self.write_local_addr(write_local_addr),
self.write_global_addr(write_global_addr),
self.write_size(write_size),
self.write_local_stride(write_local_stride)
)
def _synthesize_write_fsm(self, ram, port, ram_method):
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
ram_datawidth = (ram.datawidth if ram_method is None else
ram.orig_datawidth if 'bcast' in ram_method_name else
ram.orig_datawidth if 'block' in ram_method_name else
ram.datawidth)
if not isinstance(self.datawidth, int):
raise TypeError("axi.datawidth must be int, not '%s'" %
str(type(self.datawidth)))
if not isinstance(ram_datawidth, int):
raise TypeError("ram_datawidth must be int, not '%s'" %
str(type(ram_datawidth)))
if self.datawidth == ram_datawidth:
return self._synthesize_write_fsm_same(ram, port, ram_method, ram_datawidth)
if self.datawidth < ram_datawidth:
return self._synthesize_write_fsm_narrow(ram, port, ram_method, ram_datawidth)
return self._synthesize_write_fsm_wide(ram, port, ram_method, ram_datawidth)
def _synthesize_write_fsm_same(self, ram, port, ram_method, ram_datawidth):
op_id = self._get_write_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.write_ops:
""" already synthesized op """
return
if self.write_fsm is not None:
""" new op """
self.write_ops.append(op_id)
fsm = self.write_fsm
counter = self.write_data_counter
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, self.write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
cond = vtypes.Ands(fsm.here, self.write_op_sel == op_id)
done_out = self.write_dataflow(data, counter, cond=cond)
add_mux(self.write_data_done, done_out, 1)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'write_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.write_fsm = fsm
self.write_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name, 'write_cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name, 'write_cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name, 'write_rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, self.write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
if not self.use_global_base_addr:
gaddr = self.write_global_addr
else:
gaddr = self.write_global_addr + self.global_base_addr
fsm.If(self.write_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(self.write_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
ack, counter = self.write_request(cur_global_addr, cur_size, cond=fsm)
self.write_data_counter = counter
fsm.If(ack).goto_next()
# state 3
cond = vtypes.Ands(fsm.here, self.write_op_sel == op_id)
done_out = self.write_dataflow(data, counter, cond=cond)
add_mux(self.write_data_done, done_out, 1)
fsm.If(self.write_data_done)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(self.write_data_done, rest_size > 0).goto(check_state)
fsm.If(self.write_data_done, rest_size == 0).goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.write_idle(1)
)
fsm.goto_init()
def _synthesize_write_fsm_narrow(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth < ram.datawidth """
if ram_datawidth % self.datawidth != 0:
raise ValueError(
'ram_datawidth must be multiple number of axi.datawidth')
pack_size = ram_datawidth // self.datawidth
dma_size = (self.write_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.write_size * pack_size)
op_id = self._get_write_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.write_ops:
""" already synthesized op """
return
if pack_size in self.write_narrow_fsms:
""" new op """
self.write_ops.append(op_id)
fsm = self.write_narrow_fsms[pack_size]
wdata = self.write_narrow_wdatas[pack_size]
wvalid = self.write_narrow_wvalids[pack_size]
wready = self.write_narrow_wreadys[pack_size]
pack_count = self.write_narrow_pack_counts[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, self.write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
ack = vtypes.Ors(wready, vtypes.Not(wvalid))
cond = vtypes.Ands(fsm.here, ack, pack_count == 0,
self.write_op_sel == op_id)
rdata, rvalid = data.read(cond=cond)
stay_cond = self.write_op_sel == op_id
self.seq.If(rvalid, stay_cond)(
wdata(rdata),
wvalid(1),
pack_count.inc()
)
self.seq.If(ack, pack_count > 0, stay_cond)(
wdata(wdata >> self.datawidth),
wvalid(1),
pack_count.inc()
)
self.seq.If(ack, pack_count == pack_size - 1, stay_cond)(
wdata(wdata >> self.datawidth),
wvalid(1),
pack_count(0)
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'write_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.write_narrow_fsms[pack_size] = fsm
self.write_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, self.write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
if not self.use_global_base_addr:
gaddr = self.write_global_addr
else:
gaddr = self.write_global_addr + self.global_base_addr
fsm.If(self.write_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
ack, counter = self.write_request(cur_global_addr, cur_size, cond=fsm)
fsm.If(ack).goto_next()
# state 3
wdata = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'wdata']),
ram_datawidth, initval=0)
self.write_narrow_wdatas[pack_size] = wdata
wvalid = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'wvalid']),
initval=0)
self.write_narrow_wvalids[pack_size] = wvalid
wready = self.m.Wire('_'.join(['', self.name,
'write_narrow', str(pack_size),
'wready']))
self.write_narrow_wreadys[pack_size] = wready
pack_count = self.m.Reg('_'.join(['', self.name,
'write_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.write_narrow_pack_counts[pack_size] = pack_count
ack = vtypes.Ors(wready, vtypes.Not(wvalid))
cond = vtypes.Ands(fsm.here, ack, pack_count == 0,
self.write_op_sel == op_id)
rdata, rvalid = data.read(cond=cond)
stay_cond = self.write_op_sel == op_id
self.seq.If(ack)(
wvalid(0)
)
self.seq.If(rvalid, stay_cond)(
wdata(rdata),
wvalid(1),
pack_count.inc()
)
self.seq.If(ack, pack_count > 0, stay_cond)(
wdata(wdata >> self.datawidth),
wvalid(1),
pack_count.inc()
)
self.seq.If(ack, pack_count == pack_size - 1, stay_cond)(
wdata(wdata >> self.datawidth),
wvalid(1),
pack_count(0)
)
data = self.df.Variable(wdata, wvalid, wready,
width=self.datawidth, signed=False)
done = self.write_dataflow(data, counter, cond=fsm)
fsm.If(done)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(done, rest_size > 0).goto(check_state)
fsm.If(done, rest_size == 0).goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.write_idle(1)
)
fsm.goto_init()
def _synthesize_write_fsm_wide(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth > ram.datawidth """
if self.datawidth % ram_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of ram_datawidth')
pack_size = self.datawidth // ram_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.write_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.write_size >> shamt) + res
actual_write_size = dma_size << shamt
op_id = self._get_write_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.write_ops:
""" already synthesized op """
return
if pack_size in self.write_wide_fsms:
""" new op """
self.write_ops.append(op_id)
fsm = self.write_wide_fsms[pack_size]
wdata = self.write_wide_wdatas[pack_size]
wvalid = self.write_wide_wvalids[pack_size]
wready = self.write_wide_wreadys[pack_size]
pack_count = self.write_wide_pack_counts[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, actual_write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
fsm.If(cond).goto_next()
# state 3
fsm.set_index(3)
ack = vtypes.Ors(wready, vtypes.Not(wvalid))
cond = vtypes.Ands(fsm.here, ack, self.write_op_sel == op_id)
rdata, rvalid = data.read(cond=cond)
self.seq.If(rvalid)(
wdata(vtypes.Cat(rdata, wdata[ram_datawidth:self.datawidth])),
wvalid(0),
pack_count.inc()
)
self.seq.If(rvalid, pack_count == pack_size - 1)(
wdata(vtypes.Cat(rdata, wdata[ram_datawidth:self.datawidth])),
wvalid(1),
pack_count(0)
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'write_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.write_wide_fsms[pack_size] = fsm
self.write_ops.append(op_id)
cur_global_addr = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'cur_global_addr']),
self.addrwidth, initval=0)
cur_size = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'cur_size']),
self.addrwidth + 1, initval=0)
rest_size = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
max_burstlen = 2 ** self.burst_size_width
# state 0
cond = vtypes.Ands(self.write_start, self.write_op_sel == op_id)
data, last, done = ram_method(
port, self.write_local_addr, actual_write_size,
stride=self.write_local_stride, cond=cond, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
data = self.df._Delay(data)
last = self.df._Delay(last)
if not self.use_global_base_addr:
gaddr = self.write_global_addr
else:
gaddr = self.write_global_addr + self.global_base_addr
fsm.If(self.write_start)(
cur_global_addr(self.mask_addr(gaddr)),
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
check_state = fsm.current
self._check_4KB_boundary(fsm, max_burstlen,
cur_global_addr, cur_size, rest_size)
# state 2
ack, counter = self.write_request(cur_global_addr, cur_size, cond=fsm)
fsm.If(ack).goto_next()
# state 3
wdata = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'wdata']),
self.datawidth, initval=0)
self.write_wide_wdatas[pack_size] = wdata
wvalid = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'wvalid']),
initval=0)
self.write_wide_wvalids[pack_size] = wvalid
wready = self.m.Wire('_'.join(['', self.name,
'write_wide', str(pack_size),
'wready']))
self.write_wide_wreadys[pack_size] = wready
pack_count = self.m.Reg('_'.join(['', self.name,
'write_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.write_wide_pack_counts[pack_size] = pack_count
ack = vtypes.Ors(wready, vtypes.Not(wvalid))
cond = vtypes.Ands(fsm.here, ack, self.write_op_sel == op_id)
rdata, rvalid = data.read(cond=cond)
self.seq.If(ack)(
wvalid(0)
)
self.seq.If(rvalid)(
wdata(vtypes.Cat(rdata, wdata[ram_datawidth:self.datawidth])),
wvalid(0),
pack_count.inc()
)
self.seq.If(rvalid, pack_count == pack_size - 1)(
wdata(vtypes.Cat(rdata, wdata[ram_datawidth:self.datawidth])),
wvalid(1),
pack_count(0)
)
data = self.df.Variable(wdata, wvalid, wready,
width=self.datawidth, signed=False)
done = self.write_dataflow(data, counter, cond=fsm)
fsm.If(done)(
cur_global_addr.add(optimize(cur_size * (self.datawidth // 8)))
)
fsm.If(done, rest_size > 0).goto(check_state)
fsm.If(done, rest_size == 0).goto_next()
# state 4
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.write_idle(1)
)
fsm.goto_init()
def _set_flag(self, fsm, prefix='axim_flag'):
flag = self.m.TmpReg(initval=0, prefix=prefix)
fsm(
flag(1)
)
fsm.Delay(1)(
flag(0)
)
fsm.goto_next()
return flag
def _get_read_op_id(self, ram, port, ram_method):
ram_id = ram._id()
port = vtypes.to_int(port)
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
op = (ram_id, port, ram_method_name)
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def _get_op_write_dataflow(self, ram_datawidth):
if self.datawidth == ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
if self.datawidth < ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
wdata = self.m.TmpReg(self.datawidth, initval=0, prefix='_wdata')
wdata_ram = self.m.TmpWire(ram_datawidth, prefix='_wdata_ram')
wdata_ram.assign(wdata)
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata_ram, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
def _get_write_op_id(self, ram, port, ram_method):
ram_id = ram._id()
port = vtypes.to_int(port)
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
op = (ram_id, port, ram_method_name)
if op in self.write_op_id_map:
op_id = self.write_op_id_map[op]
else:
op_id = self.write_op_id_count
self.write_op_id_count += 1
self.write_op_id_map[op] = op_id
return op_id
def _check_4KB_boundary(self, fsm, max_burstlen,
req_global_addr, req_size, rest_size):
fsm.If(rest_size <= max_burstlen,
self.check_boundary(req_global_addr, rest_size))(
req_size(self.rest_boundary(req_global_addr)),
rest_size(
rest_size - self.rest_boundary(req_global_addr))
).Elif(rest_size <= max_burstlen)(
req_size(rest_size),
rest_size(0)
).Elif(self.check_boundary(req_global_addr, max_burstlen))(
req_size(self.rest_boundary(req_global_addr)),
rest_size(
rest_size - self.rest_boundary(req_global_addr))
).Else(
req_size(max_burstlen),
rest_size(rest_size - max_burstlen)
)
fsm.goto_next()
class AXIMLite(axi.AxiLiteMaster, _MutexFunction):
""" AXI-Lite Master Interface """
__intrinsics__ = ('read', 'write',
'set_global_base_addr',) + _MutexFunction.__intrinsics__
burstlen = 256
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_cache_mode=axi.AxCACHE_NONCOHERENT, raddr_cache_mode=axi.AxCACHE_NONCOHERENT,
waddr_prot_mode=axi.AxPROT_NONCOHERENT, raddr_prot_mode=axi.AxPROT_NONCOHERENT,
noio=False,
use_global_base_addr=False,
fsm_as_module=False):
axi.AxiLiteMaster.__init__(self, m, name, clk, rst, datawidth, addrwidth,
waddr_cache_mode, raddr_cache_mode,
waddr_prot_mode, raddr_prot_mode,
noio)
self.use_global_base_addr = use_global_base_addr
self.fsm_as_module = fsm_as_module
self.mutex = None
def read(self, fsm, global_addr):
if self.use_global_base_addr:
global_addr = self.global_base_addr + global_addr
ret = self.read_request(global_addr, length=1, cond=fsm)
if isinstance(ret, (tuple)):
ack, counter = ret
else:
ack = ret
fsm.If(ack).goto_next()
ret = self.read_data(cond=fsm)
if len(ret) == 3:
data, valid, last = ret
else:
data, valid = ret
rdata = self.m.TmpReg(self.datawidth, initval=0,
signed=True, prefix='axim_rdata')
fsm.If(valid)(rdata(data))
fsm.Then().goto_next()
return rdata
def write(self, fsm, global_addr, value):
if self.use_global_base_addr:
global_addr = self.global_base_addr + global_addr
ret = self.write_request(global_addr, length=1, cond=fsm)
if isinstance(ret, (tuple)):
ack, counter = ret
else:
ack = ret
fsm.If(ack).goto_next()
ret = self.write_data(value, cond=fsm)
if isinstance(ret, (tuple)):
ack, last = ret
else:
ack, last = ret, None
fsm.If(ack).goto_next()
def set_global_base_addr(self, fsm, addr):
if not self.use_global_base_addr:
raise ValueError("global_base_addr is disabled.")
flag = self._set_flag(fsm)
self.seq.If(flag)(
self.global_base_addr(addr)
)
class AXIS(axi.AxiSlave, _MutexFunction):
__intrinsics__ = _MutexFunction.__intrinsics__
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=axi.xUSER_DEFAULT,
rdata_user_mode=axi.xUSER_DEFAULT,
noio=False):
axi.AxiSlave.__init__(self, m, name, clk, rst, datawidth, addrwidth,
waddr_id_width, wdata_id_width, wresp_id_width,
raddr_id_width, rdata_id_width,
waddr_user_width, wdata_user_width, wresp_user_width,
raddr_user_width, rdata_user_width,
wresp_user_mode,
rdata_user_mode,
noio)
self.mutex = None
class AXISLite(axi.AxiLiteSlave, _MutexFunction):
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
noio=False):
axi.AxiLiteSlave.__init__(self, m, name, clk, rst, datawidth, addrwidth,
noio)
self.mutex = None
class AXISRegister(AXIS):
__intrinsics__ = ('read', 'write', 'write_flag', 'wait',
'wait_flag') + _MutexFunction.__intrinsics__
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=axi.xUSER_DEFAULT,
rdata_user_mode=axi.xUSER_DEFAULT,
noio=False, length=4, fsm_as_module=False):
AXIS.__init__(self, m, name, clk, rst, datawidth, addrwidth,
waddr_id_width, wdata_id_width, wresp_id_width,
raddr_id_width, rdata_id_width,
waddr_user_width, wdata_user_width, wresp_user_width,
raddr_user_width, rdata_user_width,
wresp_user_mode,
rdata_user_mode,
noio)
self.fsm_as_module = fsm_as_module
if not isinstance(length, int):
raise TypeError("length must be 'int', not '%s'" %
str(type(length)))
self.register = [self.m.Reg('_'.join(['', self.name, 'register', '%d' % i]),
width=self.datawidth, initval=0, signed=True)
for i in range(length)]
self.flag = [self.m.Reg('_'.join(['', self.name, 'flag', '%d' % i]), initval=0)
for i in range(length)]
self.resetval = [self.m.Reg('_'.join(['', self.name, 'resetval', '%d' % i]),
width=self.datawidth, initval=0, signed=True)
for i in range(length)]
self.length = length
self.maskwidth = self.m.Localparam('_'.join(['', self.name, 'maskwidth']),
util.log2(length))
self.mask = self.m.Localparam('_'.join(['', self.name, 'mask']),
vtypes.Repeat(vtypes.Int(1, 1), self.maskwidth))
self.shift = self.m.Localparam('_'.join(['', self.name, 'shift']),
util.log2(self.datawidth // 8))
self._set_register_full_fsm()
def _set_register_full_fsm(self):
fsm = FSM(self.m, '_'.join(['', self.name, 'register_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
# request
addr, counter, readvalid, writevalid = self.pull_request(cond=fsm)
maskaddr = self.m.TmpReg(self.maskwidth)
fsm.If(vtypes.Ors(readvalid, writevalid))(
maskaddr((addr >> self.shift) & self.mask),
)
init_state = fsm.current
# read
read_state = fsm.current + 1
fsm.If(readvalid).goto_from(init_state, read_state)
fsm.set_index(read_state)
rdata = self.m.TmpWire(self.datawidth, signed=True)
pat = [(maskaddr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
rdata.assign(rval)
flag = self.m.TmpWire()
pat = [(maskaddr == i, r) for i, r in enumerate(self.flag)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
flag.assign(rval)
resetval = self.m.TmpWire(self.datawidth, signed=True)
pat = [(maskaddr == i, r) for i, r in enumerate(self.resetval)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
resetval.assign(rval)
ack, last = self.push_read_data(rdata, counter, cond=fsm)
# flag reset
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, ack, flag, maskaddr == i)(
self.register[i](resetval),
self.flag[i](0)
)
fsm.If(ack)(
maskaddr.inc()
)
fsm.If(ack, last).goto_init()
# write
write_state = fsm.current + 1
fsm.If(writevalid).goto_from(init_state, write_state)
fsm.set_index(write_state)
data, mask, valid, last = self.pull_write_data(counter, cond=fsm)
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, valid, maskaddr == i)(
self.register[i](data)
)
fsm.If(valid)(
maskaddr.inc()
)
fsm.goto_init()
def read(self, fsm, addr):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
return rval
def write(self, fsm, addr, value):
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, addr == i)(
self.register[i](value),
self.flag[i](0)
)
fsm.goto_next()
def write_flag(self, fsm, addr, value, resetvalue=0):
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, addr == i)(
self.register[i](value),
self.flag[i](1),
self.resetval[i](resetvalue)
)
fsm.goto_next()
def wait(self, fsm, addr, value, polarity=True):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
if polarity:
wait_cond = (rval == value)
else:
wait_cond = (rval != value)
fsm.If(wait_cond).goto_next()
def wait_flag(self, fsm, addr, value, resetvalue=0, polarity=True):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
if polarity:
wait_cond = (rval == value)
else:
wait_cond = (rval != value)
state_cond = fsm.state == fsm.current
# flag reset
for i, r in enumerate(self.register):
self.seq.If(wait_cond, state_cond, addr == i)(
self.register[i](resetvalue)
)
fsm.If(wait_cond).goto_next()
class AXISLiteRegister(AXISLite):
__intrinsics__ = ('read', 'write', 'write_flag', 'wait',
'wait_flag') + _MutexFunction.__intrinsics__
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
noio=False, length=4, fsm_as_module=False):
AXISLite.__init__(self, m, name, clk, rst, datawidth, addrwidth,
noio)
self.fsm_as_module = fsm_as_module
if not isinstance(length, int):
raise TypeError("length must be 'int', not '%s'" %
str(type(length)))
self.register = [self.m.Reg('_'.join(['', self.name, 'register', '%d' % i]),
width=self.datawidth, initval=0, signed=True)
for i in range(length)]
self.flag = [self.m.Reg('_'.join(['', self.name, 'flag', '%d' % i]), initval=0)
for i in range(length)]
self.resetval = [self.m.Reg('_'.join(['', self.name, 'resetval', '%d' % i]),
width=self.datawidth, initval=0, signed=True)
for i in range(length)]
self.length = length
self.maskwidth = self.m.Localparam('_'.join(['', self.name, 'maskwidth']),
util.log2(length))
self.mask = self.m.Localparam('_'.join(['', self.name, 'mask']),
vtypes.Repeat(vtypes.Int(1, 1), self.maskwidth))
self.shift = self.m.Localparam('_'.join(['', self.name, 'shift']),
util.log2(self.datawidth // 8))
self._set_register_lite_fsm()
def _set_register_lite_fsm(self):
fsm = FSM(self.m, '_'.join(['', self.name, 'register_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
# request
addr, readvalid, writevalid = self.pull_request(cond=fsm)
maskaddr = self.m.TmpReg(self.maskwidth)
fsm.If(vtypes.Ors(readvalid, writevalid))(
maskaddr((addr >> self.shift) & self.mask),
)
init_state = fsm.current
# read
read_state = fsm.current + 1
fsm.If(readvalid).goto_from(init_state, read_state)
fsm.set_index(read_state)
rdata = self.m.TmpWire(self.datawidth, signed=True)
pat = [(maskaddr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
rdata.assign(rval)
flag = self.m.TmpWire()
pat = [(maskaddr == i, r) for i, r in enumerate(self.flag)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
flag.assign(rval)
resetval = self.m.TmpWire(self.datawidth, signed=True)
pat = [(maskaddr == i, r) for i, r in enumerate(self.resetval)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
resetval.assign(rval)
ack = self.push_read_data(rdata, cond=fsm)
# flag reset
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, ack, flag, maskaddr == i)(
self.register[i](resetval),
self.flag[i](0)
)
fsm.If(ack).goto_init()
# write
write_state = fsm.current + 1
fsm.If(writevalid).goto_from(init_state, write_state)
fsm.set_index(write_state)
data, mask, valid = self.pull_write_data(cond=fsm)
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, valid, maskaddr == i)(
self.register[i](data)
)
fsm.goto_init()
def read(self, fsm, addr):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
return rval
def write(self, fsm, addr, value):
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, addr == i)(
self.register[i](value),
self.flag[i](0)
)
fsm.goto_next()
def write_flag(self, fsm, addr, value, resetvalue=0):
state_cond = fsm.state == fsm.current
for i, r in enumerate(self.register):
self.seq.If(state_cond, addr == i)(
self.register[i](value),
self.flag[i](1),
self.resetval[i](resetvalue)
)
fsm.goto_next()
def wait(self, fsm, addr, value, polarity=True):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
if polarity:
wait_cond = (rval == value)
else:
wait_cond = (rval != value)
fsm.If(wait_cond).goto_next()
def wait_flag(self, fsm, addr, value, resetvalue=0, polarity=True):
if isinstance(addr, int):
rval = self.register[addr]
elif isinstance(addr, vtypes.Int):
rval = self.register[addr.value]
else:
pat = [(addr == i, r) for i, r in enumerate(self.register)]
pat.append((None, vtypes.IntX()))
rval = vtypes.PatternMux(pat)
if polarity:
wait_cond = (rval == value)
else:
wait_cond = (rval != value)
state_cond = fsm.state == fsm.current
# flag reset
for i, r in enumerate(self.register):
self.seq.If(wait_cond, state_cond, addr == i)(
self.register[i](resetvalue)
)
fsm.If(wait_cond).goto_next()
def add_mux(targ, cond, value):
prev_assign = targ._get_assign()
if not prev_assign:
targ.assign(vtypes.Mux(cond, value, 0))
else:
prev_value = prev_assign.statement.right
prev_assign.overwrite_right(
vtypes.Mux(cond, value, prev_value))
targ.module.remove(prev_assign)
targ.module.append(prev_assign)
| 37.050445
| 100
| 0.541326
|
562bf19e595983f32c6ce2e6b23819920f835b35
| 4,855
|
py
|
Python
|
3rdparty/webkit/Source/ThirdParty/ANGLE/src/libANGLE/gen_packed_gl_enums.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | null | null | null |
3rdparty/webkit/Source/ThirdParty/ANGLE/src/libANGLE/gen_packed_gl_enums.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | null | null | null |
3rdparty/webkit/Source/ThirdParty/ANGLE/src/libANGLE/gen_packed_gl_enums.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | 1
|
2019-01-25T13:55:25.000Z
|
2019-01-25T13:55:25.000Z
|
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_packed_gl_enums.py:
# Code generation for the packed GL enums.
import datetime, json, os, sys
from collections import namedtuple
Enum = namedtuple('Enum', ['name', 'values', 'max_value'])
EnumValue = namedtuple('EnumValue', ['name', 'gl_name', 'value'])
kJsonFileName = "packed_gl_enums.json"
def load_enums(path):
with open(path) as map_file:
enums_dict = json.loads(map_file.read())
enums = []
for (enum_name, values_dict) in enums_dict.iteritems():
values = []
i = 0
for (value_name, value_gl_name) in sorted(values_dict.iteritems()):
values.append(EnumValue(value_name, value_gl_name, i))
i += 1
assert(i < 255) # This makes sure enums fit in the uint8_t
enums.append(Enum(enum_name, values, i))
enums.sort(key=lambda enum: enum.name)
return enums
header_template = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PackedGLEnums_autogen.h:
// Declares ANGLE-specific enums classes for GLEnum and functions operating
// on them.
#ifndef LIBANGLE_PACKEDGLENUMS_AUTOGEN_H_
#define LIBANGLE_PACKEDGLENUMS_AUTOGEN_H_
#include <angle_gl.h>
#include <cstdint>
namespace gl
{{
template<typename Enum>
Enum FromGLenum(GLenum from);
{content}
}} // namespace gl
#endif // LIBANGLE_PACKEDGLENUMS_AUTOGEN_H_
"""
enum_declaration_template = """
enum class {enum_name} : uint8_t
{{
{value_declarations}
InvalidEnum = {max_value},
EnumCount = {max_value},
}};
template<>
{enum_name} FromGLenum<{enum_name}>(GLenum from);
GLenum ToGLenum({enum_name} from);
"""
def write_header(enums, path):
content = ['']
for enum in enums:
value_declarations = []
for value in enum.values:
value_declarations.append(' ' + value.name + ' = ' + str(value.value) + ',')
content.append(enum_declaration_template.format(
enum_name = enum.name,
max_value = str(enum.max_value),
value_declarations = '\n'.join(value_declarations)
))
header = header_template.format(
content = ''.join(content),
copyright_year = datetime.date.today().year,
data_source_name = kJsonFileName,
script_name = sys.argv[0]
)
with (open(path, 'wt')) as f:
f.write(header)
cpp_template = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PackedGLEnums_autogen.cpp:
// Implements ANGLE-specific enums classes for GLEnum and functions operating
// on them.
#include "common/debug.h"
#include "libANGLE/PackedGLEnums_autogen.h"
namespace gl
{{
{content}
}} // namespace gl
"""
enum_implementation_template = """
template<>
{enum_name} FromGLenum<{enum_name}>(GLenum from)
{{
switch(from)
{{
{from_glenum_cases}
default: return {enum_name}::InvalidEnum;
}}
}}
GLenum ToGLenum({enum_name} from)
{{
switch(from)
{{
{to_glenum_cases}
default: UNREACHABLE(); return GL_NONE;
}}
}}
"""
def write_cpp(enums, path):
content = ['']
for enum in enums:
from_glenum_cases = []
to_glenum_cases = []
for value in enum.values:
qualified_name = enum.name + '::' + value.name
from_glenum_cases.append(' case ' + value.gl_name + ': return ' + qualified_name + ';')
to_glenum_cases.append(' case ' + qualified_name + ': return ' + value.gl_name + ';')
content.append(enum_implementation_template.format(
enum_name = enum.name,
from_glenum_cases = '\n'.join(from_glenum_cases),
max_value = str(enum.max_value),
to_glenum_cases = '\n'.join(to_glenum_cases)
))
cpp = cpp_template.format(
content = ''.join(content),
copyright_year = datetime.date.today().year,
data_source_name = kJsonFileName,
script_name = sys.argv[0]
)
with (open(path, 'wt')) as f:
f.write(cpp)
if __name__ == '__main__':
path_prefix = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
enums = load_enums(path_prefix + kJsonFileName)
write_header(enums, path_prefix + 'PackedGLEnums_autogen.h')
write_cpp(enums, path_prefix + 'PackedGLEnums_autogen.cpp')
| 27.429379
| 106
| 0.66035
|
206e34dd5d21979f39095536973c39900046a362
| 7,077
|
py
|
Python
|
recipes/mpir/all/conanfile.py
|
Lukas-Heiligenbrunner/conan-center-index
|
d999bb1e3e12ccd29f2ddf7b2037678bbe33ca2a
|
[
"MIT"
] | 2
|
2022-01-04T11:30:41.000Z
|
2022-01-04T11:31:32.000Z
|
recipes/mpir/all/conanfile.py
|
Lukas-Heiligenbrunner/conan-center-index
|
d999bb1e3e12ccd29f2ddf7b2037678bbe33ca2a
|
[
"MIT"
] | null | null | null |
recipes/mpir/all/conanfile.py
|
Lukas-Heiligenbrunner/conan-center-index
|
d999bb1e3e12ccd29f2ddf7b2037678bbe33ca2a
|
[
"MIT"
] | 1
|
2020-12-11T02:35:13.000Z
|
2020-12-11T02:35:13.000Z
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
import os
import glob
class MpirConan(ConanFile):
name = "mpir"
description = "MPIR is a highly optimised library for bignum arithmetic" \
"forked from the GMP bignum library."
topics = ("conan", "mpir", "multiprecision", "math", "mathematics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://mpir.org/"
license = "LGPL-3.0-or-later"
settings = "os", "compiler", "arch", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_cxx": [True, False],
"enable_gmpcompat": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"enable_cxx": True,
"enable_gmpcompat": True
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if self.settings.compiler == "Visual Studio" and self.options.shared:
del self.options.enable_cxx
if not self.options.get_safe("enable_cxx", False):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("m4/1.4.18")
self.build_requires("yasm/1.3.0")
if tools.os_info.is_windows and self.settings.compiler != "Visual Studio" and \
"CONAN_BASH_PATH" not in os.environ and tools.os_info.detect_windows_subsystem() != "msys2":
self.build_requires("msys2/20200517")
def source(self):
tools.get(keep_permissions=True, **self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
@property
def _platforms(self):
return {"x86": "Win32", "x86_64": "x64"}
@property
def _dll_or_lib(self):
return "dll" if self.options.shared else "lib"
@property
def _vcxproj_paths(self):
compiler_version = self.settings.compiler.version if tools.Version(self.settings.compiler.version) < "16" else "15"
build_subdir = "build.vc{}".format(compiler_version)
vcxproj_paths = [
os.path.join(self._source_subfolder, build_subdir,
"{}_mpir_gc".format(self._dll_or_lib),
"{}_mpir_gc.vcxproj".format(self._dll_or_lib))
]
if self.options.get_safe("enable_cxx"):
vcxproj_paths.append(os.path.join(self._source_subfolder, build_subdir,
"lib_mpir_cxx", "lib_mpir_cxx.vcxproj"))
return vcxproj_paths
def _build_visual_studio(self):
if "MD" in self.settings.compiler.runtime and not self.options.shared: # RuntimeLibrary only defined in lib props files
props_path = os.path.join(self._source_subfolder, "build.vc",
"mpir_{}_{}.props".format(str(self.settings.build_type).lower(), self._dll_or_lib))
if self.settings.build_type == "Debug":
tools.replace_in_file(props_path, "<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>",
"<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>")
else:
tools.replace_in_file(props_path, "<RuntimeLibrary>MultiThreaded</RuntimeLibrary>",
"<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>")
msbuild = MSBuild(self)
for vcxproj_path in self._vcxproj_paths:
msbuild.build(vcxproj_path, platforms=self._platforms, upgrade_project=False)
def _configure_autotools(self):
if not self._autotools:
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
args = []
if self.options.shared:
args.extend(["--disable-static", "--enable-shared"])
else:
args.extend(["--disable-shared", "--enable-static"])
args.append("--with-pic" if self.options.get_safe("fPIC", True) else "--without-pic")
args.append("--disable-silent-rules")
args.append("--enable-cxx" if self.options.get_safe("enable_cxx") else "--disable-cxx")
args.append("--enable-gmpcompat" if self.options.enable_gmpcompat else "--disable-gmpcompat")
# compiler checks are written for C89 but compilers that default to C99 treat implicit functions as error
self._autotools.flags.append("-Wno-implicit-function-declaration")
self._autotools.configure(args=args)
return self._autotools
def build(self):
if self.settings.compiler == "Visual Studio":
self._build_visual_studio()
else:
with tools.chdir(self._source_subfolder):
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy("COPYING*", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
lib_folder = os.path.join(self._source_subfolder, self._dll_or_lib,
self._platforms.get(str(self.settings.arch)),
str(self.settings.build_type))
self.copy("mpir.h", dst="include", src=lib_folder, keep_path=True)
if self.options.enable_gmpcompat:
self.copy("gmp.h", dst="include", src=lib_folder, keep_path=True)
if self.options.get_safe("enable_cxx"):
self.copy("mpirxx.h", dst="include", src=lib_folder, keep_path=True)
if self.options.enable_gmpcompat:
self.copy("gmpxx.h", dst="include", src=lib_folder, keep_path=True)
self.copy(pattern="*.dll*", dst="bin", src=lib_folder, keep_path=False)
self.copy(pattern="*.lib", dst="lib", src=lib_folder, keep_path=False)
else:
with tools.chdir(self._source_subfolder):
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
with tools.chdir(os.path.join(self.package_folder, "lib")):
for filename in glob.glob("*.la"):
os.unlink(filename)
def package_info(self):
if self.options.get_safe("enable_cxx"):
self.cpp_info.libs.append("mpirxx")
self.cpp_info.libs.append("mpir")
if self.options.enable_gmpcompat and self.settings.compiler != "Visual Studio":
if self.options.get_safe("enable_cxx"):
self.cpp_info.libs.append("gmpxx")
self.cpp_info.libs.append("gmp")
| 45.954545
| 127
| 0.607743
|
c9a9de2c5d1b02077ccebad6bd9e4f93eacbdaf4
| 6,524
|
py
|
Python
|
src/commercetools/services/custom_objects.py
|
jeroenubbink/commercetools-python-sdk
|
ee27768d6fdde3e12618059891d1d4f75dd61390
|
[
"MIT"
] | null | null | null |
src/commercetools/services/custom_objects.py
|
jeroenubbink/commercetools-python-sdk
|
ee27768d6fdde3e12618059891d1d4f75dd61390
|
[
"MIT"
] | null | null | null |
src/commercetools/services/custom_objects.py
|
jeroenubbink/commercetools-python-sdk
|
ee27768d6fdde3e12618059891d1d4f75dd61390
|
[
"MIT"
] | null | null | null |
# DO NOT EDIT! This file is automatically generated
import typing
from marshmallow import fields
from commercetools._schemas._custom_object import (
CustomObjectDraftSchema,
CustomObjectPagedQueryResponseSchema,
CustomObjectSchema,
)
from commercetools.helpers import OptionalList, RemoveEmptyValuesMixin
from commercetools.types._custom_object import (
CustomObject,
CustomObjectDraft,
CustomObjectPagedQueryResponse,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _CustomObjectQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _CustomObjectDeleteSchema(
traits.VersionedSchema, traits.ExpandableSchema, traits.DataErasureSchema
):
version = OptionalList(fields.String(), required=False)
class CustomObjectService(abstract.AbstractService):
"""Store custom JSON values."""
def get_by_container_and_key(
self, container, key, *, expand: OptionalListStr = None
) -> CustomObject:
"""Get CustomObject by container and key"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"custom-objects/{container}/{key}",
params=params,
schema_cls=CustomObjectSchema,
)
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> CustomObject:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"custom-objects/{id}",
params=params,
schema_cls=CustomObjectSchema,
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> CustomObjectPagedQueryResponse:
"""The query endpoint allows to retrieve custom objects in a specific
container or all custom objects.
For performance reasons, it is highly advisable to query only for custom
objects in a container by using the container field in the where
predicate. Store custom JSON values.
"""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"withTotal": with_total,
"where": where,
"predicate_var": predicate_var,
},
_CustomObjectQuerySchema,
)
return self._client._get(
endpoint="custom-objects",
params=params,
schema_cls=CustomObjectPagedQueryResponseSchema,
)
def create_or_update(
self, draft: CustomObjectDraft, *, expand: OptionalListStr = None
) -> CustomObject:
"""Creates a new custom object or updates an existing custom object.
If an object with the given container/key exists, the object will be
replaced with the new value and the version is incremented. If the
request contains a version and an object with the given container/key
exists then the version must match the version of the existing object.
Concurrent updates for the same custom object still can result in a
Conflict (409) even if the version is not provided. Fields with null
values will not be saved. Store custom JSON values.
"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="custom-objects",
params=params,
data_object=draft,
request_schema_cls=CustomObjectDraftSchema,
response_schema_cls=CustomObjectSchema,
)
def create(
self, draft: CustomObjectDraft, *, expand: OptionalListStr = None
) -> CustomObject:
"""Creates a new custom object or updates an existing custom object.
If an object with the given container/key exists, the object will be
replaced with the new value and the version is incremented. If the
request contains a version and an object with the given container/key
exists then the version must match the version of the existing object.
Concurrent updates for the same custom object still can result in a
Conflict (409) even if the version is not provided. Fields with null
values will not be saved. Store custom JSON values.
"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="custom-objects",
params=params,
data_object=draft,
request_schema_cls=CustomObjectDraftSchema,
response_schema_cls=CustomObjectSchema,
)
def delete_by_container_and_key(
self,
container,
key,
*,
data_erasure: bool = None,
version: str = None,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> CustomObject:
"""Delete CustomObject by container and key"""
params = self._serialize_params(
{"dataErasure": data_erasure, "version": version, "expand": expand},
_CustomObjectDeleteSchema,
)
return self._client._delete(
endpoint=f"custom-objects/{container}/{key}",
params=params,
response_schema_cls=CustomObjectSchema,
force_delete=force_delete,
)
def delete_by_id(
self,
id: str,
*,
version: str = None,
expand: OptionalListStr = None,
data_erasure: bool = None,
force_delete: bool = False,
) -> CustomObject:
"""The version control is optional.
If the query contains a version, then it must match the version of the
object.
"""
params = self._serialize_params(
{"version": version, "expand": expand, "dataErasure": data_erasure},
_CustomObjectDeleteSchema,
)
return self._client._delete(
endpoint=f"custom-objects/{id}",
params=params,
response_schema_cls=CustomObjectSchema,
force_delete=force_delete,
)
| 35.264865
| 84
| 0.636879
|
6787f8069188ddee5fd8b844b291ecb636cdda63
| 1,934
|
py
|
Python
|
imix/models/vqa_models/uniter/train_vqa.py
|
linxi1158/iMIX
|
af87a17275f02c94932bb2e29f132a84db812002
|
[
"Apache-2.0"
] | 23
|
2021-06-26T08:45:19.000Z
|
2022-03-02T02:13:33.000Z
|
imix/models/vqa_models/uniter/train_vqa.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | null | null | null |
imix/models/vqa_models/uniter/train_vqa.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | 9
|
2021-06-10T02:36:20.000Z
|
2021-11-09T02:18:16.000Z
|
"""Copyright (c) Microsoft Corporation. Licensed under the MIT license.
UNITER finetuning for VQA
"""
import torch
from .model.vqa import UniterForVisualQuestionAnswering
from .misc import set_dropout
from .misc import compute_score_with_logits
from collections import defaultdict
from imix.models.builder import VQA_MODELS
from ..base_model import BaseModel
import logging
logger = logging.getLogger(__name__)
@VQA_MODELS.register_module()
class UNITER_VQA(BaseModel):
def __init__(self, **kwargs):
super().__init__()
args = kwargs['params']
# Prepare model
if args.pretrained_path:
checkpoint = torch.load(args.pretrained_path)
else:
checkpoint = {}
self.model = UniterForVisualQuestionAnswering.from_pretrained(
args.model_config,
checkpoint,
img_dim=args.img_dim,
num_answer=args.num_labels,
)
# make sure every process has same model parameters in the beginning
set_dropout(self.model, args.dropout)
def forward_train(self, data, **kwargs):
batch = defaultdict(lambda: None, {k: v.cuda() for k, v in data.items()})
# batch = tuple(t.cuda(device=self.device) for t in data)
model_output = self.model(batch)
return model_output
def forward_test(self, data, **kwargs):
# excluded data['qid']
batch = defaultdict(lambda: None, {k: v.cuda() for k, v in data.items() if torch.is_tensor(v)})
output = self.model(batch)
scores, target = output['scores'], output['target']
batch_score = compute_score_with_logits(scores, target).sum()
batch_size = len(target) # batch['qids'].size(0)
model_output = {
'scores': scores,
'target': target,
'batch_score': batch_score,
'batch_size': batch_size,
}
return model_output
| 29.30303
| 103
| 0.644778
|
48fc8222dd294b2dcf2f264974ff9fa41dc694e3
| 6,453
|
py
|
Python
|
scripts/DepthSummaryCsv.py
|
Kennedy-Lab-UW/Duplex-Seq-Pipeline
|
77924212089998773ce5847a40c2642698781ce2
|
[
"BSD-3-Clause"
] | 10
|
2020-04-28T20:59:57.000Z
|
2022-02-22T02:41:51.000Z
|
scripts/DepthSummaryCsv.py
|
Kennedy-Lab-UW/Duplex-Seq-Pipeline
|
77924212089998773ce5847a40c2642698781ce2
|
[
"BSD-3-Clause"
] | 35
|
2020-04-28T00:34:36.000Z
|
2022-01-20T20:59:44.000Z
|
scripts/DepthSummaryCsv.py
|
KennedyLabUW/Duplex-Seq-Pipeline
|
878457d08f3272db01550fbf85da631ae11b713c
|
[
"BSD-3-Clause"
] | 7
|
2020-08-15T01:45:01.000Z
|
2021-11-22T03:25:12.000Z
|
import datetime
import logging
import sys
from argparse import ArgumentParser
from collections import namedtuple
import statistics
from BedParser import *
Depth_Line = namedtuple(
"Depth_Line",
[
"Chrom",
"Pos",
"Ref",
"DP",
"Ns"
]
)
def str_or_nonetype(inStr):
if inStr.upper() == "NONE":
return(None)
else:
return(inStr)
def main():
# Parse in arguments
parser = ArgumentParser()
parser.add_argument(
'-i','--inFile',
action="store",
dest="in_file",
default=None,
help=("The input depth file. "
"If 'None', defaults to stdin. [%(default)s]"))
parser.add_argument(
'-o','--outFile',
action="store",
dest="out_file",
default=None,
help=("The output depth summary file. "
"If 'None', defaults to stdout. [%(default)s]"))
parser.add_argument(
'-b','--bed_file',
action="store",
dest="bed_file",
required=True,
help="The bed file to compute stats on.")
parser.add_argument(
'-m', '--mask_bed',
action='store',
dest='mask_bed',
help='A bed file with the regions to be masked.',
type=str_or_nonetype,
default=None)
parser.add_argument(
'--blocks_only',
action="store_true",
dest="blocks",
help="Use only those sites in blocks for calculating whole-line stats.")
parser.add_argument(
'--logLevel',
action="store",
dest="logLvl",
default="Info",
help=(f"Identification for how much information gets output. "
f"Acceptable levels are: 'DEBUG', 'INFO', 'WARNING', "
f"'ERROR', and 'CRITICAL'. "
)
)
o = parser.parse_args()
cmd=" ".join(sys.argv)
d = datetime.datetime.today()
# Set up logging
numeric_level = getattr(logging, o.logLvl.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {o.logLvl}')
logging.basicConfig(
format='%(levelname)s: %(message)s',
level=numeric_level,
)
logging.info(f"Running DepthSummaryCsv.py on {d} using command:")
logging.info(cmd)
# Open bed file
logging.info(f"Opening bed file {o.bed_file}...")
in_bed = Bed_File(o.bed_file)
logging.info("Parsing bed file...")
# Set up data structures
bed_dict = []
for line in in_bed:
# Add the main line
bed_dict.append(
{"region": line,
"depths": [],
"class": "Bed_Line",
"min": 0,
"mean": 0,
"median": 0,
"max": 0})
# Add subregions, if they differ from the main region
subregs = line.get_subregions()
if subregs[0].samtoolsStr() != line.samtoolsStr():
for block in subregs:
bed_dict.append(
{"region": block,
"depths": [],
"class": "Bed_Block",
"min": 0,
"mean": 0,
"median": 0,
"max": 0})
# Open input file
if o.in_file is None:
logging.info("Using input from StdIn")
f_in = sys.stdin
else:
logging.info(f"Using input from {o.in_file}")
f_in = open(o.in_file,'r')
# Open masking bed:
if o.mask_bed is not None:
mask_bed = Bed_File(o.mask_bed)
mask_regions = [x for x in mask_bed]
else:
mask_bed = False
mask_regions = []
# Iterate through the input file
logging.info("Processing input file...")
for i, lIn in enumerate(f_in):
if i % 10000 == 0:
logging.info(f"Processed {i} lines...")
if lIn[0] != "#":
line = Depth_Line(*lIn.strip().split())
# Iterate through the regions and blocks
for regIter in bed_dict:
# check if the line is masked:
mask_line = False
for mask_iter in mask_regions:
if mask_iter.contains(line.Chrom, int(line.Pos) - 1):
mask_line = True
# Count the line in any region that contains it
if (not mask_line and
regIter["region"].contains(line.Chrom,
int(line.Pos) - 1,
o.blocks)):
regIter["depths"].append(int(line.DP))
# Close input file
if o.in_file is not None:
f_in.close()
# Open output file
if o.out_file is None:
logging.info("Writing output to StdOut")
f_out = sys.stdout
else:
logging.info(f"Writing output to {o.out_file}")
f_out = open(o.out_file, 'w')
# Write header line
logging.info("Writing output file...")
f_out.write(
f"##Input file: {o.in_file}\n"
f"##Target bed file: {o.bed_file}\n"
f"##Masking bed file: {o.mask_bed}\n")
if o.blocks:
f_out.write("##Blocks only\n")
f_out.write(
"#NAME,"
"CHROM,"
"START_POS,"
"END_POS,"
"TYPE,"
"MIN,"
"MEAN,"
"MEDIAN,"
"MAX\n")
# Calculate average depths and write output
for regIter in bed_dict:
if len(regIter["region"]) > 0 and len(regIter["depths"]) > 0:
#regIter["depths"].extend([0 for x in range(
# len(regIter["region"])-len(regIter["depths"]))])
regIter["min"] = min(regIter["depths"])
regIter["max"] = max(regIter["depths"])
regIter["median"] = statistics.median(regIter["depths"])
regIter["mean"] = statistics.mean(regIter["depths"])
# Write line to output file
f_out.write(
f"{regIter['region'].name},"
f"{regIter['region'].chrom},"
f"{regIter['region'].startPos + 1},"
f"{regIter['region'].endPos},"
f"{regIter['class']},"
f"{regIter['min']},"
f"{regIter['mean']},"
f"{regIter['median']},"
f"{regIter['max']}\n")
# Close output file
if o.out_file is not None:
f_out.close()
# DONE
logging.info("DONE")
if __name__ == "__main__":
main()
| 30.438679
| 80
| 0.51325
|
648fc83735ead4099435d5ee812790fd248e0cc2
| 1,756
|
py
|
Python
|
astrodust/extinction/parse_PAH.py
|
eblur/dust
|
babbee0d5b6625f431eaff11ef33e8a839c7d7ff
|
[
"BSD-2-Clause"
] | 8
|
2016-03-25T03:35:06.000Z
|
2021-05-12T10:29:13.000Z
|
astrodust/extinction/parse_PAH.py
|
eblur/dust
|
babbee0d5b6625f431eaff11ef33e8a839c7d7ff
|
[
"BSD-2-Clause"
] | 22
|
2016-03-21T15:57:18.000Z
|
2016-06-03T04:28:07.000Z
|
astrodust/extinction/parse_PAH.py
|
eblur/dust
|
babbee0d5b6625f431eaff11ef33e8a839c7d7ff
|
[
"BSD-2-Clause"
] | 7
|
2015-07-01T19:31:18.000Z
|
2021-12-27T02:18:50.000Z
|
## Created by Lia Corrales to parse PAH optical constant tables (PAHion_30, PAHneu_30)
## November 11, 2013 : lia@astro.columbia.edu
import os
import numpy as np
def find_cmfile( name ):
root_lib_path = os.path.dirname(__file__).rstrip('extinction')
data_path = root_lib_path + 'distlib/tables/'
return data_path + name
ION_FILE = find_cmfile('PAHion_30')
NEU_FILE = find_cmfile('PAHneu_30')
def parse_PAH( option, ignore='#', flag='>', verbose=False ):
if option == 'ion': filename = ION_FILE
if option == 'neu': filename = NEU_FILE
try : f = open( filename, 'r' )
except:
print('ERROR: file not found')
return
COLS = ['w(micron)', 'Q_ext', 'Q_abs', 'Q_sca', 'g=<cos>' ]
result = {}
end_of_file = False
while not end_of_file:
try:
line = f.readline()
# Ignore the ignore character
if line[0] == ignore : pass
# Characters flagged with '>' earn a dictionary entry with grain size
elif line[0] == flag :
gsize = np.float( line.split()[1] )
if verbose : print('Reading data for grain size:', gsize)
result[ gsize ] = {}
# Initialize dictionaries with lists
for i in range( len(COLS) ) : result[gsize][COLS[i]] = []
# Sort the columns into the correct dictionary
else:
row_vals = line.split()
for i in range( len(COLS) ) :
result[ gsize ][ COLS[i] ].append( np.float( row_vals[i] ) )
except:
if verbose : print(line)
end_of_file = True
f.close()
return result
#test_ion = parse_PAH('ion')
#test_neu = parse_PAH('neu')
| 29.266667
| 86
| 0.566629
|
d8a6f3769e5a50e415385804ed7c3d655cdaec48
| 1,236
|
py
|
Python
|
Algorithms_hard/0410. Split Array Largest Sum.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | 4
|
2020-08-11T20:45:15.000Z
|
2021-03-12T00:33:34.000Z
|
Algorithms_hard/0410. Split Array Largest Sum.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
Algorithms_hard/0410. Split Array Largest Sum.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
"""
410. Split Array Largest Sum
Given an array which consists of non-negative integers and an integer m, you can split the array into m non-empty continuous subarrays. Write an algorithm to minimize the largest sum among these m subarrays.
Note:
If n is the length of array, assume the following constraints are satisfied:
1 ≤ n ≤ 1000
1 ≤ m ≤ min(50, n)
Examples:
Input:
nums = [7,2,5,10,8]
m = 2
Output:
18
Explanation:
There are four ways to split nums into two subarrays.
The best way is to split it into [7,2,5] and [10,8],
where the largest sum among the two subarrays is only 18.
"""
class Solution(object):
def splitArray(self, nums, m):
left, right = max(nums) - 1, sum(nums)
while right - left > 1:
mid = (left + right) >> 1
idx, s, cnt = 0, 0, 0
while idx < len(nums):
while idx < len(nums) and s + nums[idx] <= mid:
s += nums[idx]
idx += 1
if idx == len(nums):
cnt += 1
else:
cnt += 1
s = 0
if cnt <= m:
right = mid
else:
left = mid
return right
| 26.297872
| 207
| 0.533981
|
464f0515f98b68ea7527d1d1aa8ddcaefa498910
| 16,403
|
py
|
Python
|
spectrochempy/core/processors/align.py
|
dcambie/spectrochempy
|
e376082d66be7a4c528b7d83be076d77534e39bd
|
[
"CECILL-B"
] | 3
|
2021-04-09T09:13:21.000Z
|
2022-01-09T00:05:42.000Z
|
spectrochempy/core/processors/align.py
|
fernandezc/spectrochempy
|
4707c51dba0032c160afc40682fa16d4b9855ded
|
[
"CECILL-B"
] | null | null | null |
spectrochempy/core/processors/align.py
|
fernandezc/spectrochempy
|
4707c51dba0032c160afc40682fa16d4b9855ded
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie,
# Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in
# the root directory =
# ======================================================================================================================
"""
This module defines functions related to NDDataset alignment.
"""
__all__ = ['align']
__dataset_methods__ = __all__
# import scipy.interpolate
import numpy as np
from spectrochempy.utils import MASKED, UnitsCompatibilityError, get_n_decimals
from spectrochempy.core import warning_, error_
from spectrochempy.core.dataset.coord import Coord
# ..................................................................................................................
def can_merge_or_align(coord1, coord2):
"""
Check if two coordinates can be merged or aligned
Parameters
----------
coord1, coord2 : |Coord|
coordinates to merge or align
Returns
-------
can_merge, can_align : tuple of bools
Two flags about merge and alignment possibility
"""
if (coord1 == coord2):
# same coordinates
can_merge = True # merge is obvious
can_align = True # of course as it is the same coordinate
else:
# no the same coordinates
can_merge = False # we need to do alignment to merge
# can align only if data exists, units compatibles, and title are
# the same
can_align = True
can_align &= not coord1.is_empty
can_align &= not coord2.is_empty
can_align &= coord1.title == coord2.title
if can_align and (coord1.has_units or coord2.has_units):
if coord1.has_units:
can_align &= coord1.is_units_compatible(coord2)
else:
can_align &= coord2.is_units_compatible(coord1)
return can_merge, can_align
# ............................................................................
def align(dataset, *others, **kwargs):
"""
Align individual |NDDataset| along given dimensions using various methods.
Parameters
-----------
dataset : |NDDataset|
Dataset on which we want to salign other objects.
*others : |NDDataset|
Objects to align.
dim : str. Optional, default='x'
Along which axis to perform the alignment.
dims : list of str, optional, default=None
Align along all dims defined in dims (if dim is also
defined, then dims have higher priority).
method : enum ['outer', 'inner', 'first', 'last', 'interpolate'], optional, default='outer'
Which method to use for the alignment.
If align is defined :
* 'outer' means that a union of the different coordinates is
achieved (missing values are masked)
* 'inner' means that the intersection of the coordinates is used
* 'first' means that the first dataset is used as reference
* 'last' means that the last dataset is used as reference
* 'interpolate' means that interpolation is performed relative to
the first dataset.
interpolate_method : enum ['linear','pchip']. Optional, default='linear'
Method of interpolation to performs for the alignment.
interpolate_sampling : 'auto', int or float. Optional, default='auto'
* 'auto' : sampling is determined automatically from the existing data.
* int : if an integer values is specified, then the
sampling interval for the interpolated data will be splitted in
this number of points.
* float : If a float value is provided, it determines the interval
between the interpolated data.
coord : |Coord|, optional, default=None
coordinates to use for alignment. Ignore those corresponding to the
dimensions to align.
copy : bool, optional, default=True
If False then the returned objects will share memory with the
original objects, whenever it is possible :
in principle only if reindexing is not necessary.
Returns
--------
aligned_datasets : tuple of |NDDataset|
Same objects as datasets with dimensions aligned.
Raises
------
ValueError
issued when the dimensions given in `dim` or `dims` argument are not
compatibles (units, titles, etc...).
"""
# DEVELOPPER NOTE
# There is probably better methods, but to simplify dealing with
# LinearCoord, we transform them in Coord before treatment (going back
# to linear if possible at the end of the process)
# TODO: Perform an alignment along numeric labels
# TODO: add example in docs
# copy objects?
copy = kwargs.pop('copy', True)
# make a single list with dataset and the remaining object
objects = [dataset] + list(others)
# should we align on given external coordinates
extern_coord = kwargs.pop('coord', None)
if extern_coord and extern_coord.implements('LinearCoord'):
extern_coord = Coord(extern_coord, linear=False, copy=True)
# what's the method to use (by default='outer')
method = kwargs.pop('method', 'outer')
# trivial cases where alignment is not possible or unecessary
if not objects:
warning_('No object provided for alignment!')
return None
if len(objects) == 1 and objects[0].implements('NDDataset') and extern_coord is None:
# no necessary alignment
return objects
# evaluate on which axis we align
axis, dims = dataset.get_axis(only_first=False, **kwargs)
# check compatibility of the dims and prepare the dimension for alignment
for axis, dim in zip(axis, dims):
# get all objets to align
_objects = {}
_nobj = 0
for idx, object in enumerate(objects):
if not object.implements('NDDataset'):
error_(f'Bad object(s) found: {object}. Note that only NDDataset '
f'objects are accepted '
f'for alignment')
return None
_objects[_nobj] = {'obj': object.copy(), 'idx': idx, }
_nobj += 1
_last = _nobj - 1
# get the reference object (by default the first, except if method if
# set to 'last'
ref_obj_index = 0
if method == 'last':
ref_obj_index = _last
ref_obj = _objects[ref_obj_index]['obj']
# as we will sort their coordinates at some point, we need to know
# if the coordinates need to be reversed at
# the end of the alignment process
reversed = ref_obj.coordset[dim].reversed
if reversed:
ref_obj.sort(descend=False, dim=dim, inplace=True)
# get the coordset corresponding to the reference object
ref_obj_coordset = ref_obj.coordset
# get the coordinate for the reference dimension
ref_coord = ref_obj_coordset[dim]
# as we will sort their coordinates at some point, we need to know
# if the coordinates need to be reversed at
# the end of the alignment process
reversed = ref_coord.reversed
# prepare a new Coord object to store the final new dimension
new_coord = ref_coord.copy()
ndec = get_n_decimals(new_coord.data.max(), 1.e-5)
if new_coord.implements('LinearCoord'):
new_coord = Coord(new_coord, linear=False, copy=True)
# loop on all object
for index, object in _objects.items():
obj = object['obj']
if obj is ref_obj:
# not necessary to compare with itself!
continue
if reversed:
obj.sort(descend=False, dim=dim, inplace=True)
# get the current objet coordinates and check compatibility
coord = obj.coordset[dim]
if coord.implements('LinearCoord') or coord.linear:
coord = Coord(coord, linear=False, copy=True)
if not coord.is_units_compatible(ref_coord):
# not compatible, stop everything
raise UnitsCompatibilityError('NDataset to align must have compatible units!')
# do units transform if necesssary so coords can be compared
if coord.units != ref_coord.units:
coord.ito(ref_coord)
# adjust the new_cord depending on the method of alignement
new_coord_data = set(np.around(new_coord.data, ndec))
coord_data = set(np.around(coord.data, ndec))
if method in ['outer', 'interpolate']:
# in this case we do a union of the coords (masking the
# missing values)
# For method=`interpolate`, the interpolation will be
# performed in a second step
new_coord._data = sorted(coord_data | new_coord_data)
elif method == 'inner':
# take only intersection of the coordinates
# and generate a warning if it result something null or
new_coord._data = sorted(coord_data & new_coord_data)
elif method in ['first', 'last']:
# we take the reference coordinates already determined as
# basis (masking the missing values)
continue
else:
raise NotImplementedError(f'The method {method} is unknown!')
# Now perform alignment of all objects on the new coordinates
for index, object in _objects.items():
obj = object['obj']
# get the dim index for the given object
dim_index = obj.dims.index(dim)
# prepare slicing keys ; set slice(None) for the untouched
# dimensions preceeding the dimension of interest
prepend_keys = [slice(None)] * dim_index
# New objects for obj must be created with the new coordinates
# change the data shape
new_obj_shape = list(obj.shape)
new_obj_shape[dim_index] = len(new_coord)
new_obj_data = np.full(new_obj_shape, np.NaN)
# create new dataset for obj and ref_objects
if copy:
new_obj = obj.copy()
else:
new_obj = obj
# update the data and mask
coord = obj.coordset[dim]
coord_data = set(np.around(coord.data, ndec))
dim_loc = new_coord._loc2index(sorted(coord_data))
loc = tuple(prepend_keys + [dim_loc])
new_obj._data = new_obj_data
# mask all the data then unmask later the relevant data in
# the next step
if not new_obj.is_masked:
new_obj.mask = MASKED
new_obj.mask[loc] = False
else:
mask = new_obj.mask.copy()
new_obj.mask = MASKED
new_obj.mask[loc] = mask
# set the data for the loc
new_obj._data[loc] = obj.data
# update the coordinates
new_coordset = obj.coordset.copy()
if coord.is_labeled:
label_shape = list(coord.labels.shape)
label_shape[0] = new_coord.size
new_coord._labels = np.zeros(tuple(label_shape)).astype(coord.labels.dtype)
new_coord._labels[:] = '--'
new_coord._labels[dim_loc] = coord.labels
setattr(new_coordset, dim, new_coord)
new_obj._coordset = new_coordset
# reversed?
if reversed:
# we must reverse the given coordinates
new_obj.sort(descend=reversed, dim=dim, inplace=True)
# update the _objects
_objects[index]['obj'] = new_obj
if method == 'interpolate':
warning_('Interpolation not yet implemented - for now equivalent '
'to `outer`')
# the new transformed object must be in the same order as the passed
# objects
# and the missing values must be masked (for the moment they are defined to NaN
for index, object in _objects.items():
obj = object['obj']
# obj[np.where(np.isnan(obj))] = MASKED # mask NaN values
obj[np.where(np.isnan(obj))] = 99999999999999. # replace NaN values (to simplify
# comparisons)
idx = int(object['idx'])
objects[idx] = obj
# we also transform into linear coord if possible ?
# TODO:
# Now return
return tuple(objects)
# if method == 'interpolate': # # # reorders dataset and reference # in ascending order # is_sorted
# = False # if # dataset.coordset(axis).reversed: # datasetordered = # dataset.sort(axis,
# descend=False) # refordered = ref.sort( # refaxis, descend=False) # is_sorted = True #
# else: # # datasetordered = dataset.copy() # refordered = ref.copy() # # try: #
# datasetordered.coordset(axis).to( # refordered.coordset(refaxis).units) # except: # # raise
# ValueError( # 'units of the dataset and # reference axes on which interpolate are not
# compatible') # # # oldaxisdata = datasetordered.coordset(axis).data # # refaxisdata =
# refordered.coordset(refaxis).data # TODO: at the # end restore the original order # # method =
# kwargs.pop( # 'method', 'linear') # fill_value = kwargs.pop('fill_value', # np.NaN) # #
# if method == 'linear': # interpolator # = lambda data, ax=0: scipy.interpolate.interp1d( # #
# oldaxisdata, data, axis=ax, kind=method, bounds_error=False, # fill_value=fill_value,
# assume_sorted=True) # # elif # method == 'pchip': # interpolator = lambda data,
# ax=0: scipy.interpolate.PchipInterpolator( # # oldaxisdata, data, axis=ax,
# extrapolate=False) # # else: # raise AttributeError(f'{method} is not a #
# recognised option method for `align`') # # # interpolate_data = interpolator(
# datasetordered.data, # axis) # newdata = interpolate_data(refaxisdata) # # #
# if datasetordered.is_masked: # interpolate_mask = # interpolator(
# datasetordered.mask, axis) # newmask # = interpolate_mask(refaxisdata) #
# else: # # newmask = NOMASK # # # interpolate_axis = #
# interpolator(datasetordered.coordset(axis).data) # # # newaxisdata =
# interpolate_axis(refaxisdata) # # newaxisdata = refaxisdata.copy() # # if
# method == # 'pchip' and not np.isnan(fill_value): # index = #
# np.any(np.isnan(newdata)) # newdata[index] = # fill_value # #
# index = np.any(np.isnan( # newaxisdata)) # newaxisdata[index] = fill_value
# # # create the new axis # newaxes = dataset.coords.copy() # # newaxes[axis]._data = newaxisdata
# newaxes[axis]._labels = # np.array([''] * newaxisdata.size) # # # transform the dataset #
# inplace = kwargs.pop('inplace', False) # # if inplace: # # out = dataset # else: #
# out = dataset.copy() # # # out._data = newdata # out._coords = newaxes # out._mask #
# = newmask # # out.name = dataset.name # out.title = # dataset.title # # out.history
# = '{}: Aligned along dim {} # with respect to dataset {} using coords {} \n'.format( # # str(
# dataset.modified), axis, ref.name, ref.coords[refaxis].title) # # if is_sorted and out.coordset(
# axis).reversed: # # out.sort(axis, descend=True, inplace=True) # ref.sort( # refaxis,
# descend=True, inplace=True) # # return out
| 43.165789
| 120
| 0.572151
|
c96b5308c4b4a2271397872d063bedadf71ff068
| 969
|
py
|
Python
|
tests/handlers/websocket/test_kwarg_handling.py
|
t1waz/starlite
|
f25a21c3785dce406b42d15859f445c919ec1875
|
[
"MIT"
] | 334
|
2022-01-07T12:14:54.000Z
|
2022-03-30T23:28:03.000Z
|
tests/handlers/websocket/test_kwarg_handling.py
|
t1waz/starlite
|
f25a21c3785dce406b42d15859f445c919ec1875
|
[
"MIT"
] | 70
|
2022-01-06T18:41:33.000Z
|
2022-03-23T20:21:33.000Z
|
tests/handlers/websocket/test_kwarg_handling.py
|
t1waz/starlite
|
f25a21c3785dce406b42d15859f445c919ec1875
|
[
"MIT"
] | 24
|
2022-01-06T22:02:01.000Z
|
2022-03-20T01:43:39.000Z
|
from starlite import Parameter, WebSocket, create_test_client, websocket
def test_handle_websocket_params_parsing() -> None:
@websocket(path="/{socket_id:int}")
async def websocket_handler(
socket: WebSocket,
headers: dict,
query: dict,
cookies: dict,
socket_id: int,
qp: int,
hp: str = Parameter(header="some-header"),
) -> None:
assert socket_id
assert headers
assert query
assert cookies
assert qp
assert hp
await socket.accept()
data = await socket.receive_json()
assert data
await socket.send_json({"data": "123"})
await socket.close()
client = create_test_client(route_handlers=websocket_handler)
with client.websocket_connect("/1?qp=1", headers={"some-header": "abc"}, cookies={"cookie": "yum"}) as ws:
ws.send_json({"data": "123"})
data = ws.receive_json()
assert data
| 29.363636
| 110
| 0.608875
|
1eb4d6eec80d66e8d6e964b29f23f9e162b22ee4
| 47,882
|
py
|
Python
|
tasks/R2R/speaker/follower.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 5
|
2020-08-12T14:49:22.000Z
|
2022-02-13T21:48:30.000Z
|
tasks/R2R/speaker/follower.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 16
|
2020-07-26T08:28:56.000Z
|
2022-03-12T00:43:03.000Z
|
tasks/R2R/speaker/follower.py
|
sunqiang85/DASA
|
c4fdc61db77f59f84c68abec3b985fbd7dc29323
|
[
"MIT-0",
"MIT"
] | 4
|
2020-07-30T06:25:31.000Z
|
2021-03-03T10:08:35.000Z
|
''' Agents: stop/random/shortest/seq2seq '''
import json
import sys
import numpy as np
import random
from collections import namedtuple
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.distributions as D
from .utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda
#from env import FOLLOWER_MODEL_ACTIONS, FOLLOWER_ENV_ACTIONS, IGNORE_ACTION_INDEX, LEFT_ACTION_INDEX, RIGHT_ACTION_INDEX, START_ACTION_INDEX, END_ACTION_INDEX, FORWARD_ACTION_INDEX, index_action_tuple
InferenceState = namedtuple("InferenceState", "prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha")
Cons = namedtuple("Cons", "first, rest")
def cons_to_list(cons):
l = []
while True:
l.append(cons.first)
cons = cons.rest
if cons is None:
break
return l
def backchain_inference_states(last_inference_state):
states = []
observations = []
actions = []
inf_state = last_inference_state
scores = []
last_score = None
attentions = []
while inf_state is not None:
states.append(inf_state.world_state)
observations.append(inf_state.observation)
actions.append(inf_state.last_action)
attentions.append(inf_state.last_alpha)
if last_score is not None:
scores.append(last_score - inf_state.score)
last_score = inf_state.score
inf_state = inf_state.prev_inference_state
scores.append(last_score)
return list(reversed(states)), list(reversed(observations)), list(reversed(actions))[1:], list(reversed(scores))[1:], list(reversed(attentions))[1:] # exclude start action
def least_common_viewpoint_path(inf_state_a, inf_state_b):
# return inference states traversing from A to X, then from Y to B,
# where X and Y are the least common ancestors of A and B respectively that share a viewpointId
path_to_b_by_viewpoint = {
}
b = inf_state_b
b_stack = Cons(b, None)
while b is not None:
path_to_b_by_viewpoint[b.world_state.viewpointId] = b_stack
b = b.prev_inference_state
b_stack = Cons(b, b_stack)
a = inf_state_a
path_from_a = [a]
while a is not None:
vp = a.world_state.viewpointId
if vp in path_to_b_by_viewpoint:
path_to_b = cons_to_list(path_to_b_by_viewpoint[vp])
assert path_from_a[-1].world_state.viewpointId == path_to_b[0].world_state.viewpointId
return path_from_a + path_to_b[1:]
a = a.prev_inference_state
path_from_a.append(a)
raise AssertionError("no common ancestor found")
def batch_instructions_from_encoded(encoded_instructions, max_length, reverse=False, sort=False):
# encoded_instructions: list of lists of token indices (should not be padded, or contain BOS or EOS tokens)
#seq_tensor = np.array(encoded_instructions)
# make sure pad does not start any sentence
num_instructions = len(encoded_instructions)
seq_tensor = np.full((num_instructions, max_length), vocab_pad_idx)
seq_lengths = []
for i, inst in enumerate(encoded_instructions):
if len(inst) > 0:
assert inst[-1] != vocab_eos_idx
if reverse:
inst = inst[::-1]
inst = np.concatenate((inst, [vocab_eos_idx]))
inst = inst[:max_length]
seq_tensor[i,:len(inst)] = inst
seq_lengths.append(len(inst))
seq_tensor = torch.from_numpy(seq_tensor)
if sort:
seq_lengths, perm_idx = torch.from_numpy(np.array(seq_lengths)).sort(0, True)
seq_lengths = list(seq_lengths)
seq_tensor = seq_tensor[perm_idx]
mask = (seq_tensor == vocab_pad_idx)[:, :max(seq_lengths)]
ret_tp = try_cuda(Variable(seq_tensor, requires_grad=False).long()), \
try_cuda(mask.byte()), \
seq_lengths
if sort:
ret_tp = ret_tp + (list(perm_idx),)
return ret_tp
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
results = {}
for key, item in self.results.items():
results[key] = {
'instr_id': item['instr_id'],
'trajectory': item['trajectory'],
}
with open(self.results_path, 'w') as f:
json.dump(results, f)
def rollout(self):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self):
self.env.reset_epoch()
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
#print 'Testing %s' % self.__class__.__name__
looped = False
rollout_scores = []
beam_10_scores = []
while True:
rollout_results = self.rollout()
# if self.feedback == 'argmax':
# beam_results = self.beam_search(1, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# assert rollout_traj['instr_id'] == beam_trajs[0]['instr_id']
# assert rollout_traj['trajectory'] == beam_trajs[0]['trajectory']
# assert np.allclose(rollout_traj['score'], beam_trajs[0]['score'])
# print("passed check: beam_search with beam_size=1")
#
# self.env.set_beam_size(10)
# beam_results = self.beam_search(10, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# rollout_score = rollout_traj['score']
# rollout_scores.append(rollout_score)
# beam_score = beam_trajs[0]['score']
# beam_10_scores.append(beam_score)
# # assert rollout_score <= beam_score
# self.env.set_beam_size(1)
# # print("passed check: beam_search with beam_size=10")
# if self.feedback == 'teacher' and self.beam_size == 1:
# rollout_loss = self.loss
# path_obs, path_actions, encoded_instructions = self.env.gold_obs_actions_and_instructions(self.episode_len, load_next_minibatch=False)
# for i in range(len(rollout_results)):
# assert rollout_results[i]['actions'] == path_actions[i]
# assert [o1['viewpoint'] == o2['viewpoint']
# for o1, o2 in zip(rollout_results[i]['observations'], path_obs[i])]
# trajs, loss = self._score_obs_actions_and_instructions(path_obs, path_actions, encoded_instructions)
# for traj, rollout in zip(trajs, rollout_results):
# assert traj['instr_id'] == rollout['instr_id']
# assert traj['actions'] == rollout['actions']
# assert np.allclose(traj['score'], rollout['score'])
# assert np.allclose(rollout_loss.item(), loss.item())
# print('passed score test')
for result in rollout_results:
if result['instr_id'] in self.results:
looped = True
else:
self.results[result['instr_id']] = result
if looped:
break
# if self.feedback == 'argmax':
# print("avg rollout score: ", np.mean(rollout_scores))
# print("avg beam 10 score: ", np.mean(beam_10_scores))
return self.results
def path_element_from_observation(ob):
return (ob['viewpoint'], ob['heading'], ob['elevation'])
class StopAgent(BaseAgent):
''' An agent that doesn't move! '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob) ]
} for ob in obs]
return traj
class RandomAgent(BaseAgent):
''' An agent that picks a random direction then tries to go straight for
five viewpoint steps and then stops. '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)]
} for ob in obs]
ended = [False] * len(obs)
self.steps = [0] * len(obs)
for t in range(6):
actions = []
for i, ob in enumerate(obs):
if self.steps[i] >= 5:
actions.append(0) # do nothing, i.e. end
ended[i] = True
elif self.steps[i] == 0:
a = np.random.randint(len(ob['adj_loc_list']) - 1) + 1
actions.append(a) # choose a random adjacent loc
self.steps[i] += 1
else:
assert len(ob['adj_loc_list']) > 1
actions.append(1) # go forward
self.steps[i] += 1
world_states = self.env.step(world_states, actions, obs)
obs = self.env.observe(world_states)
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
return traj
class ShortestAgent(BaseAgent):
''' An agent that always takes the shortest path to goal. '''
def rollout(self):
world_states = self.env.reset()
#obs = self.env.observe(world_states)
all_obs, all_actions = self.env.shortest_paths_to_goals(world_states, 20)
return [
{
'instr_id': obs[0]['instr_id'],
# end state will appear twice because stop action is a no-op, so exclude it
'trajectory': [path_element_from_observation(ob) for ob in obs[:-1]]
}
for obs in all_obs
]
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
# env_actions = FOLLOWER_ENV_ACTIONS
# start_index = START_ACTION_INDEX
# ignore_index = IGNORE_ACTION_INDEX
# forward_index = FORWARD_ACTION_INDEX
# end_index = END_ACTION_INDEX
feedback_options = ['teacher', 'argmax', 'sample']
def __init__(self, env, results_path, encoder, decoder, episode_len=10, beam_size=1, reverse_instruction=True, max_instruction_length=80):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.encoder = encoder
self.decoder = decoder
self.episode_len = episode_len
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
self.beam_size = beam_size
self.reverse_instruction = reverse_instruction
self.max_instruction_length = max_instruction_length
# @staticmethod
# def n_inputs():
# return len(FOLLOWER_MODEL_ACTIONS)
#
# @staticmethod
# def n_outputs():
# return len(FOLLOWER_MODEL_ACTIONS)-2 # Model doesn't output start or ignore
def _feature_variables(self, obs, beamed=False):
''' Extract precomputed features into variable. '''
feature_lists = list(zip(*[ob['feature'] for ob in (flatten(obs) if beamed else obs)]))
assert len(feature_lists) == len(self.env.image_features_list)
batched = []
for featurizer, feature_list in zip(self.env.image_features_list, feature_lists):
batched.append(featurizer.batch_features(feature_list))
return batched
def _action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
max_num_a = -1
for i, ob in enumerate(obs):
max_num_a = max(max_num_a, len(ob['adj_loc_list']))
is_valid = np.zeros((len(obs), max_num_a), np.float32)
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros(
(len(obs), max_num_a, action_embedding_dim),
dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
num_a = len(adj_loc_list)
is_valid[i, 0:num_a] = 1.
for n_a, adj_dict in enumerate(adj_loc_list):
action_embeddings[i, :num_a, :] = ob['action_embedding']
return (
Variable(torch.from_numpy(action_embeddings), requires_grad=False).cuda(),
Variable(torch.from_numpy(is_valid), requires_grad=False).cuda(),
is_valid)
def _teacher_action(self, obs, ended):
''' Extract teacher actions into variable. '''
a = torch.LongTensor(len(obs))
for i,ob in enumerate(obs):
# Supervised teacher only moves one axis at a time
a[i] = ob['teacher'] if not ended[i] else -1
return try_cuda(Variable(a, requires_grad=False))
def _proc_batch(self, obs, beamed=False):
encoded_instructions = [ob['instr_encoding'] for ob in (flatten(obs) if beamed else obs)]
return batch_instructions_from_encoded(encoded_instructions, self.max_instruction_length, reverse=self.reverse_instruction)
def rollout(self):
if self.beam_size == 1:
return self._rollout_with_loss()
else:
assert self.beam_size >= 1
beams, _, _ = self.beam_search(self.beam_size)
return [beam[0] for beam in beams]
def _score_obs_actions_and_instructions(self, path_obs, path_actions, encoded_instructions):
batch_size = len(path_obs)
assert len(path_actions) == batch_size
assert len(encoded_instructions) == batch_size
for path_o, path_a in zip(path_obs, path_actions):
assert len(path_o) == len(path_a) + 1
seq, seq_mask, seq_lengths, perm_indices = \
batch_instructions_from_encoded(
encoded_instructions, self.max_instruction_length,
reverse=True, sort=True)#self.reverse_instruction
loss = 0
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size)
sequence_scores = try_cuda(torch.zeros(batch_size))
traj = [{
'instr_id': path_o[0]['instr_id'],
'trajectory': [path_element_from_observation(path_o[0])],
'actions': [],
'scores': [],
'observations': [path_o[0]],
'instr_encoding': path_o[0]['instr_encoding']
} for path_o in path_obs]
obs = None
for t in range(self.episode_len):
next_obs = []
next_target_list = []
for perm_index, src_index in enumerate(perm_indices):
path_o = path_obs[src_index]
path_a = path_actions[src_index]
if t < len(path_a):
next_target_list.append(path_a[t])
next_obs.append(path_o[t])
else:
next_target_list.append(-1)
next_obs.append(obs[perm_index])
obs = next_obs
target = try_cuda(Variable(torch.LongTensor(next_target_list), requires_grad=False))
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
loss += self.criterion(logit, target)
# Determine next model inputs
a_t = torch.clamp(target, min=0) # teacher forcing
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, target, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# Save trajectory output
for perm_index, src_index in enumerate(perm_indices):
ob = obs[perm_index]
if not ended[perm_index]:
traj[src_index]['trajectory'].append(path_element_from_observation(ob))
traj[src_index]['score'] = float(sequence_scores[perm_index])
traj[src_index]['scores'].append(action_scores[perm_index])
traj[src_index]['actions'].append(a_t.data[perm_index])
# traj[src_index]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].item()
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
return traj, loss
def _rollout_with_loss(self):
initial_world_states = self.env.reset(sort=True)
initial_obs = self.env.observe(initial_world_states)
initial_obs = np.array(initial_obs)
batch_size = len(initial_obs)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs)
# Forward through encoder, giving initial hidden state and memory cell for decoder
# TODO consider not feeding this into the decoder, and just using attention
self.loss = 0
feedback = self.feedback
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)],
'actions': [],
'scores': [],
'observations': [ob],
'instr_encoding': ob['instr_encoding']
} for ob in initial_obs]
obs = initial_obs
world_states = initial_world_states
# Initial action
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
env_action = [None] * batch_size
sequence_scores = try_cuda(torch.zeros(batch_size))
for t in range(self.episode_len):
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
target = self._teacher_action(obs, ended)
self.loss += self.criterion(logit, target)
# Determine next model inputs
if feedback == 'teacher':
# turn -1 (ignore) to 0 (stop) so that the action is executable
a_t = torch.clamp(target, min=0)
elif feedback == 'argmax':
_,a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
elif feedback == 'sample':
probs = F.softmax(logit, dim=1) # sampling an action from model
# Further mask probs where agent can't move forward
# Note input to `D.Categorical` does not have to sum up to 1
# http://pytorch.org/docs/stable/torch.html#torch.multinomial
probs[is_valid == 0] = 0.
m = D.Categorical(probs)
a_t = m.sample()
else:
sys.exit('Invalid feedback option')
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, a_t, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# dfried: I changed this so that the ended list is updated afterward; this causes <end> to be added as the last action, along with its score, and the final world state will be duplicated (to more closely match beam search)
# Make environment action
for i in range(batch_size):
action_idx = a_t[i].item()
env_action[i] = action_idx
world_states = self.env.step(world_states, env_action, obs)
obs = self.env.observe(world_states)
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, world_states[0], a_t.item(), sequence_scores[0]))
# Save trajectory output
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
traj[i]['score'] = sequence_scores[i]
traj[i]['scores'].append(action_scores[i])
traj[i]['actions'].append(a_t.data[i])
traj[i]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].item()
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
#self.losses.append(self.loss.item() / self.episode_len)
# shouldn't divide by the episode length because of masking
self.losses.append(self.loss.item())
return traj
def beam_search(self, beam_size, load_next_minibatch=True, mask_undo=False):
assert self.env.beam_size >= beam_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
for _ in range(batch_size):
completed.append([])
beams = [
[InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=i,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=None, c_t=None, last_alpha=None)]
for i, (ws, o) in enumerate(zip(world_states, obs))
]
# Do a sequence rollout and calculate the loss
for t in range(self.episode_len):
flat_indices = []
beam_indices = []
u_t_list = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
flat_indices.append(inf_state.flat_index)
u_t_list.append(inf_state.last_action_embedding)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
flat_obs = flatten(obs)
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t[flat_indices], c_t[flat_indices], ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#action_scores, action_indices = log_probs.topk(min(beam_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1)
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
new_beams = []
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
assert len(beam_obs) == len(beam)
if beam:
for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \
enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index], action_indices[start_index:end_index])):
flat_index = start_index + inf_index
for action_score, action_index in zip(action_score_row, action_index_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=ob, # will be updated later after successors are pruned
flat_index=flat_index,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score), h_t=None, c_t=None,
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size]
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
successor_obs = self.env.observe(successor_world_states, beamed=True)
all_successors = structured_map(lambda inf_state, world_state, obs: inf_state._replace(world_state=world_state, observation=obs),
all_successors, successor_world_states, successor_obs, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
for beam_index, successors in enumerate(all_successors):
new_beam = []
for successor in successors:
if successor.last_action == 0 or t == self.episode_len - 1:
completed[beam_index].append(successor)
else:
new_beam.append(successor)
if len(completed[beam_index]) >= beam_size:
new_beam = []
new_beams.append(new_beam)
beams = new_beams
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
obs = [
[inf_state.observation for inf_state in beam]
for beam in beams
]
# Early exit if all ended
if not any(beam for beam in beams):
break
trajs = []
for this_completed in completed:
assert this_completed
this_trajs = []
for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
traversed_lists = None # todo
return trajs, completed, traversed_lists
def state_factored_search(self, completion_size, successor_size, load_next_minibatch=True, mask_undo=False, first_n_ws_key=4):
assert self.env.beam_size >= successor_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
initial_obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
completed_holding = []
for _ in range(batch_size):
completed.append({})
completed_holding.append({})
state_cache = [
{ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=None,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)}
for i, (ws, o) in enumerate(zip(world_states, initial_obs))
]
beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())]
for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one
# traversed_lists = None
# list of inference states containing states in order of the states being expanded
last_expanded_list = []
traversed_lists = []
for beam in beams:
assert len(beam) == 1
first_state = beam[0]
last_expanded_list.append(first_state)
traversed_lists.append([first_state])
def update_traversed_lists(new_visited_inf_states):
assert len(new_visited_inf_states) == len(last_expanded_list)
assert len(new_visited_inf_states) == len(traversed_lists)
for instance_index, instance_states in enumerate(new_visited_inf_states):
last_expanded = last_expanded_list[instance_index]
# todo: if this passes, shouldn't need traversed_lists
assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId
for inf_state in instance_states:
path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state)
# path_from_last should include last_expanded's world state as the first element, so check and drop that
assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId
traversed_lists[instance_index].extend(path_from_last_to_next[1:])
last_expanded = inf_state
last_expanded_list[instance_index] = last_expanded
# Do a sequence rollout and calculate the loss
while any(len(comp) < completion_size for comp in completed):
beam_indices = []
u_t_list = []
h_t_list = []
c_t_list = []
flat_obs = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
u_t_list.append(inf_state.last_action_embedding)
h_t_list.append(inf_state.h_t.unsqueeze(0))
c_t_list.append(inf_state.c_t.unsqueeze(0))
flat_obs.append(inf_state.observation)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t = torch.cat(h_t_list, dim=0)
c_t = torch.cat(c_t_list, dim=0)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, action_score_row) in \
enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])):
flat_index = start_index + inf_index
for action_index, action_score in enumerate(action_score_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=flat_obs[flat_index], # will be updated later after successors are pruned
flat_index=None,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=inf_state.score + action_score,
h_t=h_t[flat_index], c_t=c_t[flat_index],
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
all_successors = structured_map(lambda inf_state, world_state: inf_state._replace(world_state=world_state),
all_successors, successor_world_states, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
assert len(all_successors) == len(state_cache)
new_beams = []
for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)):
# early stop if we've already built a sizable completion list
instance_completed = completed[beam_index]
instance_completed_holding = completed_holding[beam_index]
if len(instance_completed) >= completion_size:
new_beams.append([])
continue
for successor in successors:
ws_keys = successor.world_state[0:first_n_ws_key]
if successor.last_action == 0 or successor.action_count == self.episode_len:
if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][0].score < successor.score:
instance_completed_holding[ws_keys] = (successor, False)
else:
if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score:
instance_cache[ws_keys] = (successor, False)
# third value: did this come from completed_holding?
uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in instance_cache.items() if not expanded)
completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in instance_completed_holding.items() if not expanded)
import itertools
import heapq
to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider)
ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score)
new_beam = []
for ws_keys, inf_state, is_completed in ws_keys_and_inf_states:
if is_completed:
assert instance_completed_holding[ws_keys] == (inf_state, False)
instance_completed_holding[ws_keys] = (inf_state, True)
if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score:
instance_completed[ws_keys] = inf_state
else:
instance_cache[ws_keys] = (inf_state, True)
new_beam.append(inf_state)
if len(instance_completed) >= completion_size:
new_beams.append([])
else:
new_beams.append(new_beam)
beams = new_beams
# Early exit if all ended
if not any(beam for beam in beams):
break
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
successor_obs = self.env.observe(world_states, beamed=True)
beams = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
beams, successor_obs, nested=True)
update_traversed_lists(beams)
completed_list = []
for this_completed in completed:
completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size])
completed_ws = [
[inf_state.world_state for inf_state in comp_l]
for comp_l in completed_list
]
completed_obs = self.env.observe(completed_ws, beamed=True)
completed_list = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
completed_list, completed_obs, nested=True)
# TODO: consider moving observations and this update earlier so that we don't have to traverse as far back
update_traversed_lists(completed_list)
# TODO: sanity check the traversed lists here
trajs = []
for this_completed in completed_list:
assert this_completed
this_trajs = []
for inf_state in this_completed:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
# completed_list: list of lists of final inference states corresponding to the candidates, one list per instance
# traversed_lists: list of "physical states" that the robot has explored, one per instance
return trajs, completed_list, traversed_lists
def set_beam_size(self, beam_size):
if self.env.beam_size < beam_size:
self.env.set_beam_size(beam_size)
self.beam_size = beam_size
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1):
''' Evaluate once on each instruction in the current environment '''
if not allow_cheat: # permitted for purpose of calculating validation loss only
assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time!
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
self.set_beam_size(beam_size)
return super(Seq2SeqAgent, self).test()
def train(self, encoder_optimizer, decoder_optimizer, n_iters, feedback='teacher'):
''' Train for a given number of iterations '''
assert all(f in self.feedback_options for f in feedback.split("+"))
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
it = range(1, n_iters + 1)
try:
import tqdm
it = tqdm.tqdm(it)
except:
pass
for _ in it:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self._rollout_with_loss()
self.loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
def _encoder_and_decoder_paths(self, base_path):
return base_path + "_enc", base_path + "_dec"
def save(self, path):
''' Snapshot models '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
torch.save(self.encoder.state_dict(), encoder_path)
torch.save(self.decoder.state_dict(), decoder_path)
def load(self, path, **kwargs):
''' Loads parameters (but not training state) '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
self.encoder.load_state_dict(torch.load(encoder_path, **kwargs))
self.decoder.load_state_dict(torch.load(decoder_path, **kwargs))
| 46.218147
| 234
| 0.586337
|
6e03a2b93203864e2d2de2fb166f756912e6a3fa
| 9,173
|
py
|
Python
|
test/acceptance/src/integration/cli-sd/cli_instances_management/steps.py
|
ealogar/servicedirectory
|
fb4f4bfa8b499b93c03af589ef2f34c08a830b17
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/src/integration/cli-sd/cli_instances_management/steps.py
|
ealogar/servicedirectory
|
fb4f4bfa8b499b93c03af589ef2f34c08a830b17
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/src/integration/cli-sd/cli_instances_management/steps.py
|
ealogar/servicedirectory
|
fb4f4bfa8b499b93c03af589ef2f34c08a830b17
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from lettuce import step
from common.mongo_utils import MongoUtils
from common.cli_utils import CLIUtils
from component.common.mock_utils import SDMockUtils
mongo_utils = MongoUtils()
cli_utils = CLIUtils()
mock_utils = SDMockUtils()
#****************** CLI configuration steps ***************************************
@step(u'the CLI is installed and ready to be executed')
def the_CLI_is_installed_and_ready_to_be_executed(step):
cli_utils.back_up_config_file()
cli_utils.start_cli()
@step(u'the CLI is configured with the configuration (\d+):')
def the_CLI_is_configured_with_the_configuration(step, cliconfig_index):
cli_utils.the_CLI_is_configured_with_the_configuration(step, cliconfig_index)
@step(u'the CLI config file has been removed')
def the_CLI_config_file_has_been_removed(step):
cli_utils.delete_cli_config()
@step(u'the CLI config file has lost the property (\d+):')
def the_CLI_config_file_has_lost_the_property(step, property_index):
cli_utils.erase_property(step.hashes[int(property_index)]["property"])
@step(u'the config file is restored')
def the_config_file_is_restored(step):
cli_utils.restore_config_file()
#****************** CLI operations ***************************************
@step(u'I request the info')
def I_request_the_info(step):
cli_utils.send_cli("info")
@step(u'I request the operation (\d+):')
def I_request_the_operation(step, operation_index):
cli_utils.send_cli((step.hashes[int(operation_index)]["options"] + step.hashes[int(operation_index)]["operation"] +\
" " + step.hashes[int(operation_index)]["arguments"]))
@step(u'a class is created with the CLI and operation values in (\d+):')
def a_class_is_created_with_the_CLI_and_operation_values(step, operation_index):
I_request_the_operation(step, operation_index)
@step(u'an instance is created with the CLI and operation values in (\d+):')
def an_instance_is_created_with_the_CLI_and_operation_values(step, operation_index):
I_request_the_operation(step, operation_index)
#****************** Mock configurations steps ***************************************
@step(u'the SD is ready to return the version info:')
def the_SD_is_i_ready_to_return_the_version_info(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/info", 200, 0)
@step(u'the SD is ready to return an invalid data for resources in (\d+)')
def the_SD_is_i_ready_to_return_an_invalid_format(step, invalid_index):
mock_utils.set_invalid_data(step.hashes[int(invalid_index)]["data"], step.hashes[int(invalid_index)]["resource"], 200, 0)
@step(u'the SD is ready to return the class:')
def the_SD_is_i_ready_to_return_the_class(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes", 201, 0)
@step(u'the SD is ready to return a specific class:')
def the_SD_is_i_ready_to_return_a_specific_class(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test", 201, 0)
@step(u'the SD is ready to return the updated class:')
def the_SD_is_i_ready_to_return_the_updated_class(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test", 200, 0)
@step(u'the SD is ready to return the classes:')
def the_SD_is_i_ready_to_return_the_classes(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes", 200)
@step(u'the SD is ready to return the following instances of class "([^"]*)":')
def the_SD_is_i_ready_to_return_the_following_instances(step, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + class_name + "/instances", 200)
@step(u'the SD is ready to return no instances of class "([^"]*)"')
def the_SD_is_i_ready_to_return_no_instances(step, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + class_name + "/instances", 200)
@step(u'the SD is ready to return the instance:')
def the_SD_is_i_ready_to_return_the_instance(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test/instances", 201,0)
@step(u'the SD is ready to return the instance (\d+):')
def the_SD_is_i_ready_to_return_the_instance_configured(step, sd_config_index):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test/instances", 201,sd_config_index)
@step(u'the SD is ready to return no classes')
def the_SD_is_i_ready_to_return_no_classes(step):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes", 200)
@step(u'the SD is ready to accept the deletion of resource (\d+):')
def the_SD_is_i_ready_to_accept_the_deletion_of_resource(step, sd_config_index):
resource = step.hashes[int(sd_config_index)]["resource"]
del step.hashes[int(sd_config_index)]["resource"]
mock_utils.get_request_and_send_response_of_type_with_data(step, resource, 204, 0)
@step(u'the SD is ready to return the class with timeout (\d+):')
def the_SD_is_i_ready_to_return_the_class_with_timeout(step, timeout):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes", 200, None, timeout)
@step(u'the SD is ready to return an error (\d+):')
def the_SD_is_i_ready_to_return_an_error(step, error_code):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes", error_code, 0)
@step(u'the SD is ready to return an error (\d+) for instance creation:')
def the_SD_is_i_ready_to_return_an_error_for_instance_creation(step, error_code):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test/instances", error_code, 0)
@step(u'the SD is ready to return an error (\d+) in the class:')
def the_SD_is_i_ready_to_return_an_error_the_class(step, error_code):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/test", error_code, 0)
@step(u'the SD is ready to return an error (\d+) in the class (.*):')
def the_SD_is_i_ready_to_return_an_error_the_class_class_name(step, error_code, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + class_name, error_code, 0)
@step(u'the SD is ready to return an error (\d+) in the instance (.*) of class (.*):')
def the_SD_is_i_ready_to_return_an_error_the_instance(step, error_code, instance_id, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + \
class_name + "/instances/" + instance_id, error_code, 0)
@step(u'the SD is ready to return the instance (.*) of class "(.*)":')
def the_SD_is_i_ready_to_return_the_instance_of_class(step, instance_id, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + \
class_name + "/instances/" + instance_id, 200, 0)
@step(u'the SD is ready to return an empty instance (.*) of class "(.*)":')
def the_SD_is_i_ready_to_return_an_empty_instance_of_class(step, instance_id, class_name):
mock_utils.get_request_and_send_response_of_type_with_data(step, "sd/v1/classes/" + \
class_name + "/instances/" + instance_id, 200, 0)
#****************** Results validation steps ***************************************
@step(u'the result set contains the data (\d+):')
def the_result_set_contains_the_data(step, data_index):
cli_utils.validate_data(step, data_index)
@step(u'the result set contains the item (\d+):')
def the_result_set_contains_the_item(step, data_index):
cli_utils.validate_item(step, data_index)
@step(u'the result set contains:')
def the_result_set_contains(step):
cli_utils.validate_collection(step)
@step(u'the result set contains the help info')
def the_result_set_contains_the_help_info(step):
cli_utils.validate_help(step)
@step(u'the configuration contains the data (\d+):')
def the_configuration_contains_the_data(step, data_index):
cli_utils.validate_configuration(step, data_index)
@step(u'the error contains the data (\d+):')
def the_error_contains_the_data(step, data_index):
cli_utils.validate_error(step, data_index)
@step(u'the SD in instances collection of class "(.*)" has received the params (\d+):')
def the_SD_in_instances_collection_of_class_has_received_the_params(step, class_name, param_index):
mock_utils.validate_stored_request("sd/v1/classes/" + class_name + "/instances", step.hashes[int(param_index)])
@step(u'the SD in instances collection of class "(.*)" has received the body (\d+):')
def the_SD_in_instances_collection_of_class_has_received_the_body(step, class_name, body_index):
mock_utils.validate_stored_request("sd/v1/classes/" + class_name + "/instances", None , step.hashes[int(body_index)])
| 41.31982
| 125
| 0.76082
|
b0b83422e1444fe0320542b01790d1ed7165b284
| 1,508
|
py
|
Python
|
catalog/bindings/wfs/transaction_response_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/wfs/transaction_response_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/wfs/transaction_response_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
from bindings.wfs.action_results_type import ActionResultsType
from bindings.wfs.transaction_summary_type import TransactionSummaryType
__NAMESPACE__ = "http://www.opengis.net/wfs/2.0"
@dataclass
class TransactionResponseType:
transaction_summary: Optional[TransactionSummaryType] = field(
default=None,
metadata={
"name": "TransactionSummary",
"type": "Element",
"namespace": "http://www.opengis.net/wfs/2.0",
"required": True,
},
)
insert_results: Optional[ActionResultsType] = field(
default=None,
metadata={
"name": "InsertResults",
"type": "Element",
"namespace": "http://www.opengis.net/wfs/2.0",
},
)
update_results: Optional[ActionResultsType] = field(
default=None,
metadata={
"name": "UpdateResults",
"type": "Element",
"namespace": "http://www.opengis.net/wfs/2.0",
},
)
replace_results: Optional[ActionResultsType] = field(
default=None,
metadata={
"name": "ReplaceResults",
"type": "Element",
"namespace": "http://www.opengis.net/wfs/2.0",
},
)
version: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
"pattern": r"2\.0\.\d+",
},
)
| 29
| 72
| 0.56366
|
d5d9150e4e8515f71c862ec54e9ddd782cce19e5
| 19,378
|
py
|
Python
|
homeassistant/components/sensor/fitbit.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/sensor/fitbit.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:29:36.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/sensor/fitbit.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 1
|
2019-12-18T20:06:33.000Z
|
2019-12-18T20:06:33.000Z
|
"""
Support for the Fitbit API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fitbit/
"""
import os
import logging
import datetime
import time
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.const import CONF_UNIT_SYSTEM
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['fitbit==0.3.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = 'access_token'
ATTR_REFRESH_TOKEN = 'refresh_token'
ATTR_CLIENT_ID = 'client_id'
ATTR_CLIENT_SECRET = 'client_secret'
ATTR_LAST_SAVED_AT = 'last_saved_at'
CONF_MONITORED_RESOURCES = 'monitored_resources'
CONF_CLOCK_FORMAT = 'clock_format'
ATTRIBUTION = 'Data provided by Fitbit.com'
DEPENDENCIES = ['http']
FITBIT_AUTH_CALLBACK_PATH = '/api/fitbit/callback'
FITBIT_AUTH_START = '/api/fitbit'
FITBIT_CONFIG_FILE = 'fitbit.conf'
FITBIT_DEFAULT_RESOURCES = ['activities/steps']
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {
'client_id': 'CLIENT_ID_HERE',
'client_secret': 'CLIENT_SECRET_HERE'
}
FITBIT_RESOURCES_LIST = {
'activities/activityCalories': ['Activity Calories', 'cal', 'fire'],
'activities/calories': ['Calories', 'cal', 'fire'],
'activities/caloriesBMR': ['Calories BMR', 'cal', 'fire'],
'activities/distance': ['Distance', '', 'map-marker'],
'activities/elevation': ['Elevation', '', 'walk'],
'activities/floors': ['Floors', 'floors', 'walk'],
'activities/heart': ['Resting Heart Rate', 'bpm', 'heart-pulse'],
'activities/minutesFairlyActive':
['Minutes Fairly Active', 'minutes', 'walk'],
'activities/minutesLightlyActive':
['Minutes Lightly Active', 'minutes', 'walk'],
'activities/minutesSedentary':
['Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/minutesVeryActive': ['Minutes Very Active', 'minutes', 'run'],
'activities/steps': ['Steps', 'steps', 'walk'],
'activities/tracker/activityCalories':
['Tracker Activity Calories', 'cal', 'fire'],
'activities/tracker/calories': ['Tracker Calories', 'cal', 'fire'],
'activities/tracker/distance': ['Tracker Distance', '', 'map-marker'],
'activities/tracker/elevation': ['Tracker Elevation', '', 'walk'],
'activities/tracker/floors': ['Tracker Floors', 'floors', 'walk'],
'activities/tracker/minutesFairlyActive':
['Tracker Minutes Fairly Active', 'minutes', 'walk'],
'activities/tracker/minutesLightlyActive':
['Tracker Minutes Lightly Active', 'minutes', 'walk'],
'activities/tracker/minutesSedentary':
['Tracker Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/tracker/minutesVeryActive':
['Tracker Minutes Very Active', 'minutes', 'run'],
'activities/tracker/steps': ['Tracker Steps', 'steps', 'walk'],
'body/bmi': ['BMI', 'BMI', 'human'],
'body/fat': ['Body Fat', '%', 'human'],
'body/weight': ['Weight', '', 'human'],
'devices/battery': ['Battery', None, None],
'sleep/awakeningsCount':
['Awakenings Count', 'times awaken', 'sleep'],
'sleep/efficiency': ['Sleep Efficiency', '%', 'sleep'],
'sleep/minutesAfterWakeup': ['Minutes After Wakeup', 'minutes', 'sleep'],
'sleep/minutesAsleep': ['Sleep Minutes Asleep', 'minutes', 'sleep'],
'sleep/minutesAwake': ['Sleep Minutes Awake', 'minutes', 'sleep'],
'sleep/minutesToFallAsleep':
['Sleep Minutes to Fall Asleep', 'minutes', 'sleep'],
'sleep/startTime': ['Sleep Start Time', None, 'clock'],
'sleep/timeInBed': ['Sleep Time in Bed', 'minutes', 'hotel']
}
FITBIT_MEASUREMENTS = {
'en_US': {
'duration': 'ms',
'distance': 'mi',
'elevation': 'ft',
'height': 'in',
'weight': 'lbs',
'body': 'in',
'liquids': 'fl. oz.',
'blood glucose': 'mg/dL',
'battery': '',
},
'en_GB': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'stone',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
},
'metric': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'kilograms',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
}
}
BATTERY_LEVELS = {
'High': 100,
'Medium': 50,
'Low': 20,
'Empty': 0
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES):
vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default='24H'):
vol.In(['12H', '24H']),
vol.Optional(CONF_UNIT_SYSTEM, default='default'):
vol.In(['en_GB', 'en_US', 'metric', 'default'])
})
def request_app_setup(hass, config, add_entities, config_path,
discovery_info=None):
"""Assist user with configuring the Fitbit dev application."""
configurator = hass.components.configurator
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = ("You didn't correctly modify fitbit.conf",
" please try again")
configurator.notify_errors(_CONFIGURING['fitbit'],
error_msg)
else:
setup_platform(hass, config, add_entities, discovery_info)
else:
setup_platform(hass, config, add_entities, discovery_info)
start_url = "{}{}".format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
description = """Please create a Fitbit developer app at
https://dev.fitbit.com/apps/new.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(start_url, config_path)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description, submit_caption=submit,
description_image="/static/images/config_fitbit_app.png"
)
def request_oauth_completion(hass):
"""Request user complete Fitbit OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['fitbit'], "Failed to register, please try again.")
return
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_START)
description = "Please authorize Fitbit by visiting {}".format(start_url)
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Fitbit."
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fitbit sensor."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None)
return False
else:
save_json(config_path, DEFAULT_CONFIG)
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
import fitbit
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = fitbit.Fitbit(config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
unit_system = config.get(CONF_UNIT_SYSTEM)
if unit_system == 'default':
authd_client.system = authd_client. \
user_profile_get()["user"]["locale"]
if authd_client.system != 'en_GB':
if hass.config.units.is_metric:
authd_client.system = 'metric'
else:
authd_client.system = 'en_US'
else:
authd_client.system = unit_system
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == 'devices/battery':
for dev_extra in registered_devs:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format, dev_extra))
else:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format))
add_entities(dev, True)
else:
oauth = fitbit.api.FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET))
redirect_uri = '{}{}'.format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=['activity', 'heartrate', 'nutrition', 'profile',
'settings', 'sleep', 'weight'])
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(
config, add_entities, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = 'api:fitbit:callback'
def __init__(self, config, add_entities, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_entities = add_entities
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
hass = request.app['hass']
data = request.query
response_message = """Fitbit has been successfully authorized!
You can close this window now!"""
result = None
if data.get('code') is not None:
redirect_uri = '{}{}'.format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH)
try:
result = self.oauth.fetch_access_token(data.get('code'),
redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Fitbit Auth</title></head>
<body><h1>{}</h1></body></html>""".format(response_message)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get('access_token'),
ATTR_REFRESH_TOKEN: result.get('refresh_token'),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config,
self.add_entities)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Fitbit sensor."""
def __init__(self, client, config_path, resource_type,
is_metric, clock_format, extra=None):
"""Initialize the Fitbit sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = '{0} Battery'.format(self.extra.get('deviceVersion'))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split('/')
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS['metric']
else:
measurement_system = FITBIT_MEASUREMENTS['en_US']
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == 'devices/battery' and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get('battery')]
return icon_for_battery_level(
battery_level=battery_level, charging=None)
return 'mdi:{}'.format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
if self.extra:
attrs['model'] = self.extra.get('deviceVersion')
attrs['type'] = self.extra.get('type').lower()
return attrs
def update(self):
"""Get the latest data from the Fitbit API and update the states."""
if self.resource_type == 'devices/battery' and self.extra:
self._state = self.extra.get('battery')
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period='7d')
raw_state = response[container][-1].get('value')
if self.resource_type == 'activities/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'activities/tracker/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'body/bmi':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/fat':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/weight':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'sleep/startTime':
if raw_state == '':
self._state = '-'
elif self.clock_format == '12H':
hours, minutes = raw_state.split(':')
hours, minutes = int(hours), int(minutes)
setting = 'AM'
if hours > 12:
setting = 'PM'
hours -= 12
elif hours == 0:
hours = 12
self._state = '{}:{:02d} {}'.format(hours, minutes,
setting)
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = '{0:,}'.format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == 'activities/heart':
self._state = response[container][-1]. \
get('value').get('restingHeartRate')
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get('access_token'),
ATTR_REFRESH_TOKEN: token.get('refresh_token'),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(self.config_path, config_contents)
| 38.833667
| 79
| 0.602694
|
a176f9765da303d5f4504d2148f683bb096f3205
| 2,398
|
py
|
Python
|
src/vendor/worker/workerdb.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | null | null | null |
src/vendor/worker/workerdb.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | null | null | null |
src/vendor/worker/workerdb.py
|
radomirklacza/C-BAS
|
5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32
|
[
"BSD-3-Clause"
] | 2
|
2017-08-07T15:24:05.000Z
|
2018-10-11T10:53:23.000Z
|
import os.path
from datetime import datetime
from sqlalchemy import Table, Column, MetaData, ForeignKey, PickleType, DateTime, String, Integer, Text, create_engine, select, and_, or_, not_, event
from sqlalchemy.orm import scoped_session, sessionmaker, mapper
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.ext.declarative import declarative_base
import eisoil.core.pluginmanager as pm
import eisoil.core.log
logger=eisoil.core.log.getLogger('worker')
from eisoil.config import expand_eisoil_path
WORKERDB_PATH = expand_eisoil_path(pm.getService('config').get('worker.dbpath'))
WORKERDB_ENGINE = "sqlite:///%s" % (WORKERDB_PATH,)
# initialize sqlalchemy
db_engine = create_engine(WORKERDB_ENGINE, pool_recycle=6000) # please see the wiki for more info
db_session_factory = sessionmaker(autoflush=True, bind=db_engine, expire_on_commit=False) # the class which can create sessions (factory pattern)
db_session = scoped_session(db_session_factory) # still a session creator, but it will create _one_ session per thread and delegate all method calls to it
# we could limit the session's scope (lifetime) to one request, but for this plugin it is not necessary
Base = declarative_base() # get the base class for the ORM, which includes the metadata object (collection of table descriptions)
class JobDBEntry(Base):
__tablename__ = 'worker_jobs'
id = Column(Integer, primary_key=True)
service_name = Column(String)
callable_attr_str = Column(String)
params = Column(PickleType)
recurring_interval = Column(Integer)
next_execution = Column(DateTime)
Base.metadata.create_all(db_engine) # create the tables if they are not there yet
def getAllJobs():
"""Do not change the values of the records retrieved with this function. You might accedently change them in the database too. Unless you call updateJob"""
records = db_session.query(JobDBEntry).all()
return records
def addJob(job_db_entry):
"""Creates a config item, if it does not exist. If it already exists this function does not change anything."""
job_db_entry.id = None
db_session.add(job_db_entry)
db_session.commit()
def commit():
"""Commits the changes to objects in the session (e.g. a changed attribute in an object)."""
db_session.commit()
def delJob(job_db_entry):
db_session.delete(job_db_entry)
db_session.commit()
| 44.407407
| 159
| 0.773561
|
b548913d71986fbeae20270b8e3f18f3bd295147
| 5,608
|
py
|
Python
|
roundup/dist/command/build_scripts.py
|
jerrykan/herder
|
381f51198f21d232cc05d7c458638d78c0f25366
|
[
"MIT"
] | null | null | null |
roundup/dist/command/build_scripts.py
|
jerrykan/herder
|
381f51198f21d232cc05d7c458638d78c0f25366
|
[
"MIT"
] | null | null | null |
roundup/dist/command/build_scripts.py
|
jerrykan/herder
|
381f51198f21d232cc05d7c458638d78c0f25366
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2009 Stefan Seefeld
# All rights reserved.
# For license terms see the file COPYING.txt.
#
from distutils.command.build_scripts import build_scripts as base
import sys, os, string
class build_scripts(base):
""" Overload the build_scripts command and create the scripts
from scratch, depending on the target platform.
You have to define the name of your package in an inherited
class (due to the delayed instantiation of command classes
in distutils, this cannot be passed to __init__).
The scripts are created in an uniform scheme: they start the
run() function in the module
<packagename>.scripts.<mangled_scriptname>
The mangling of script names replaces '-' and '/' characters
with '-' and '.', so that they are valid module paths.
If the target platform is win32, create .bat files instead of
*nix shell scripts. Target platform is set to "win32" if main
command is 'bdist_wininst' or if the command is 'bdist' and
it has the list of formats (from command line or config file)
and the first item on that list is wininst. Otherwise
target platform is set to current (build) platform.
"""
package_name = 'roundup'
def initialize_options(self):
base.initialize_options(self)
self.script_preamble = None
self.target_platform = None
self.python_executable = None
def finalize_options(self):
base.finalize_options(self)
cmdopt=self.distribution.command_options
# find the target platform
if self.target_platform:
# TODO? allow explicit setting from command line
target = self.target_platform
if cmdopt.has_key("bdist_wininst"):
target = "win32"
elif cmdopt.get("bdist", {}).has_key("formats"):
formats = cmdopt["bdist"]["formats"][1].split(",")
if formats[0] == "wininst":
target = "win32"
else:
target = sys.platform
if len(formats) > 1:
self.warn(
"Scripts are built for %s only (requested formats: %s)"
% (target, ",".join(formats)))
else:
# default to current platform
target = sys.platform
self.target_platfom = target
# for native builds, use current python executable path;
# for cross-platform builds, use default executable name
if self.python_executable:
# TODO? allow command-line option
pass
if target == sys.platform:
self.python_executable = os.path.normpath(sys.executable)
else:
self.python_executable = "python"
# for windows builds, add ".bat" extension
if target == "win32":
# *nix-like scripts may be useful also on win32 (cygwin)
# to build both script versions, use:
#self.scripts = list(self.scripts) + [script + ".bat"
# for script in self.scripts]
self.scripts = [script + ".bat" for script in self.scripts]
# tweak python path for installations outside main python library
if cmdopt.get("install", {}).has_key("prefix"):
prefix = os.path.expanduser(cmdopt['install']['prefix'][1])
version = '%d.%d'%sys.version_info[:2]
self.script_preamble = """
import sys
sys.path.insert(1, "%s/lib/python%s/site-packages")
"""%(prefix, version)
else:
self.script_preamble = ''
def copy_scripts(self):
""" Create each script listed in 'self.scripts'
"""
to_module = string.maketrans('-/', '_.')
self.mkpath(self.build_dir)
for script in self.scripts:
outfile = os.path.join(self.build_dir, os.path.basename(script))
#if not self.force and not newer(script, outfile):
# self.announce("not copying %s (up-to-date)" % script)
# continue
if self.dry_run:
self.announce("would create %s" % outfile)
continue
module = os.path.splitext(os.path.basename(script))[0]
module = string.translate(module, to_module)
script_vars = {
'python': self.python_executable,
'package': self.package_name,
'module': module,
'prefix': self.script_preamble,
}
self.announce("creating %s" % outfile)
file = open(outfile, 'w')
try:
# could just check self.target_platform,
# but looking at the script extension
# makes it possible to build both *nix-like
# and windows-like scripts on win32.
# may be useful for cygwin.
if os.path.splitext(outfile)[1] == ".bat":
file.write('@echo off\n'
'if NOT "%%_4ver%%" == "" "%(python)s" -c "from %(package)s.scripts.%(module)s import run; run()" %%$\n'
'if "%%_4ver%%" == "" "%(python)s" -c "from %(package)s.scripts.%(module)s import run; run()" %%*\n'
% script_vars)
else:
file.write('#! %(python)s\n%(prefix)s'
'from %(package)s.scripts.%(module)s import run\n'
'run()\n'
% script_vars)
finally:
file.close()
os.chmod(outfile, 0755)
| 39.216783
| 128
| 0.562589
|
23ddc3397ce44c9ccc78ecfbba6033f5a844f92f
| 17,244
|
py
|
Python
|
discord/enums.py
|
alexyy802/discord.io
|
99d6ec71aeb121f6887ff266d36d9d1851abe4db
|
[
"MIT"
] | null | null | null |
discord/enums.py
|
alexyy802/discord.io
|
99d6ec71aeb121f6887ff266d36d9d1851abe4db
|
[
"MIT"
] | null | null | null |
discord/enums.py
|
alexyy802/discord.io
|
99d6ec71aeb121f6887ff266d36d9d1851abe4db
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2021-present VincentRPS
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import types
from collections import namedtuple
from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Type, TypeVar
__all__ = (
'Enum',
'ChannelType',
'MessageType',
'VoiceRegion',
'SpeakingState',
'VerificationLevel',
'ContentFilter',
'Status',
'DefaultAvatar',
'AuditLogAction',
'AuditLogActionCategory',
'UserFlags',
'ActivityType',
'NotificationLevel',
'TeamMembershipState',
'WebhookType',
'ExpireBehaviour',
'ExpireBehavior',
'StickerType',
'StickerFormatType',
'InviteTarget',
'VideoQualityMode',
'ComponentType',
'ButtonStyle',
'StagePrivacyLevel',
'InteractionType',
'InteractionResponseType',
'NSFWLevel',
)
def _create_value_cls(name, comparable):
cls = namedtuple('_EnumValue_' + name, 'name value')
cls.__repr__ = lambda self: f'<{name}.{self.name}: {self.value!r}>'
cls.__str__ = lambda self: f'{name}.{self.name}'
if comparable:
cls.__le__ = lambda self, other: isinstance(other, self.__class__) and self.value <= other.value
cls.__ge__ = lambda self, other: isinstance(other, self.__class__) and self.value >= other.value
cls.__lt__ = lambda self, other: isinstance(other, self.__class__) and self.value < other.value
cls.__gt__ = lambda self, other: isinstance(other, self.__class__) and self.value > other.value
return cls
def _is_descriptor(obj):
return hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')
class EnumMeta(type):
if TYPE_CHECKING:
__name__: ClassVar[str]
_enum_member_names_: ClassVar[List[str]]
_enum_member_map_: ClassVar[Dict[str, Any]]
_enum_value_map_: ClassVar[Dict[Any, Any]]
def __new__(cls, name, bases, attrs, *, comparable: bool = False):
value_mapping = {}
member_mapping = {}
member_names = []
value_cls = _create_value_cls(name, comparable)
for key, value in list(attrs.items()):
is_descriptor = _is_descriptor(value)
if key[0] == '_' and not is_descriptor:
continue
# Special case classmethod to just pass through
if isinstance(value, classmethod):
continue
if is_descriptor:
setattr(value_cls, key, value)
del attrs[key]
continue
try:
new_value = value_mapping[value]
except KeyError:
new_value = value_cls(name=key, value=value)
value_mapping[value] = new_value
member_names.append(key)
member_mapping[key] = new_value
attrs[key] = new_value
attrs['_enum_value_map_'] = value_mapping
attrs['_enum_member_map_'] = member_mapping
attrs['_enum_member_names_'] = member_names
attrs['_enum_value_cls_'] = value_cls
actual_cls = super().__new__(cls, name, bases, attrs)
value_cls._actual_enum_cls_ = actual_cls # type: ignore
return actual_cls
def __iter__(cls):
return (cls._enum_member_map_[name] for name in cls._enum_member_names_)
def __reversed__(cls):
return (cls._enum_member_map_[name] for name in reversed(cls._enum_member_names_))
def __len__(cls):
return len(cls._enum_member_names_)
def __repr__(cls):
return f'<enum {cls.__name__}>'
@property
def __members__(cls):
return types.MappingProxyType(cls._enum_member_map_)
def __call__(cls, value):
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
raise ValueError(f"{value!r} is not a valid {cls.__name__}")
def __getitem__(cls, key):
return cls._enum_member_map_[key]
def __setattr__(cls, name, value):
raise TypeError('Enums are immutable.')
def __delattr__(cls, attr):
raise TypeError('Enums are immutable')
def __instancecheck__(self, instance):
# isinstance(x, Y)
# -> __instancecheck__(Y, x)
try:
return instance._actual_enum_cls_ is self
except AttributeError:
return False
if TYPE_CHECKING:
from enum import Enum
else:
class Enum(metaclass=EnumMeta):
@classmethod
def try_value(cls, value):
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
return value
class ChannelType(Enum):
text = 0
private = 1
voice = 2
group = 3
category = 4
news = 5
store = 6
news_thread = 10
public_thread = 11
private_thread = 12
stage_voice = 13
def __str__(self):
return self.name
class MessageType(Enum):
default = 0
recipient_add = 1
recipient_remove = 2
call = 3
channel_name_change = 4
channel_icon_change = 5
pins_add = 6
new_member = 7
premium_guild_subscription = 8
premium_guild_tier_1 = 9
premium_guild_tier_2 = 10
premium_guild_tier_3 = 11
channel_follow_add = 12
guild_stream = 13
guild_discovery_disqualified = 14
guild_discovery_requalified = 15
guild_discovery_grace_period_initial_warning = 16
guild_discovery_grace_period_final_warning = 17
thread_created = 18
reply = 19
application_command = 20
thread_starter_message = 21
guild_invite_reminder = 22
class VoiceRegion(Enum):
us_west = 'us-west'
us_east = 'us-east'
us_south = 'us-south'
us_central = 'us-central'
eu_west = 'eu-west'
eu_central = 'eu-central'
singapore = 'singapore'
london = 'london'
sydney = 'sydney'
amsterdam = 'amsterdam'
frankfurt = 'frankfurt'
brazil = 'brazil'
hongkong = 'hongkong'
russia = 'russia'
japan = 'japan'
southafrica = 'southafrica'
south_korea = 'south-korea'
india = 'india'
europe = 'europe'
dubai = 'dubai'
vip_us_east = 'vip-us-east'
vip_us_west = 'vip-us-west'
vip_amsterdam = 'vip-amsterdam'
def __str__(self):
return self.value
class SpeakingState(Enum):
none = 0
voice = 1
soundshare = 2
priority = 4
def __str__(self):
return self.name
def __int__(self):
return self.value
class VerificationLevel(Enum, comparable=True):
none = 0
low = 1
medium = 2
high = 3
highest = 4
def __str__(self):
return self.name
class ContentFilter(Enum, comparable=True):
disabled = 0
no_role = 1
all_members = 2
def __str__(self):
return self.name
class Status(Enum):
online = 'online'
offline = 'offline'
idle = 'idle'
dnd = 'dnd'
do_not_disturb = 'dnd'
invisible = 'invisible'
def __str__(self):
return self.value
class DefaultAvatar(Enum):
blurple = 0
grey = 1
gray = 1
green = 2
orange = 3
red = 4
def __str__(self):
return self.name
class NotificationLevel(Enum, comparable=True):
all_messages = 0
only_mentions = 1
class AuditLogActionCategory(Enum):
create = 1
delete = 2
update = 3
class AuditLogAction(Enum):
# fmt: off
guild_update = 1
channel_create = 10
channel_update = 11
channel_delete = 12
overwrite_create = 13
overwrite_update = 14
overwrite_delete = 15
kick = 20
member_prune = 21
ban = 22
unban = 23
member_update = 24
member_role_update = 25
member_move = 26
member_disconnect = 27
bot_add = 28
role_create = 30
role_update = 31
role_delete = 32
invite_create = 40
invite_update = 41
invite_delete = 42
webhook_create = 50
webhook_update = 51
webhook_delete = 52
emoji_create = 60
emoji_update = 61
emoji_delete = 62
message_delete = 72
message_bulk_delete = 73
message_pin = 74
message_unpin = 75
integration_create = 80
integration_update = 81
integration_delete = 82
stage_instance_create = 83
stage_instance_update = 84
stage_instance_delete = 85
sticker_create = 90
sticker_update = 91
sticker_delete = 92
thread_create = 110
thread_update = 111
thread_delete = 112
# fmt: on
@property
def category(self) -> Optional[AuditLogActionCategory]:
# fmt: off
lookup: Dict[AuditLogAction, Optional[AuditLogActionCategory]] = {
AuditLogAction.guild_update: AuditLogActionCategory.update,
AuditLogAction.channel_create: AuditLogActionCategory.create,
AuditLogAction.channel_update: AuditLogActionCategory.update,
AuditLogAction.channel_delete: AuditLogActionCategory.delete,
AuditLogAction.overwrite_create: AuditLogActionCategory.create,
AuditLogAction.overwrite_update: AuditLogActionCategory.update,
AuditLogAction.overwrite_delete: AuditLogActionCategory.delete,
AuditLogAction.kick: None,
AuditLogAction.member_prune: None,
AuditLogAction.ban: None,
AuditLogAction.unban: None,
AuditLogAction.member_update: AuditLogActionCategory.update,
AuditLogAction.member_role_update: AuditLogActionCategory.update,
AuditLogAction.member_move: None,
AuditLogAction.member_disconnect: None,
AuditLogAction.bot_add: None,
AuditLogAction.role_create: AuditLogActionCategory.create,
AuditLogAction.role_update: AuditLogActionCategory.update,
AuditLogAction.role_delete: AuditLogActionCategory.delete,
AuditLogAction.invite_create: AuditLogActionCategory.create,
AuditLogAction.invite_update: AuditLogActionCategory.update,
AuditLogAction.invite_delete: AuditLogActionCategory.delete,
AuditLogAction.webhook_create: AuditLogActionCategory.create,
AuditLogAction.webhook_update: AuditLogActionCategory.update,
AuditLogAction.webhook_delete: AuditLogActionCategory.delete,
AuditLogAction.emoji_create: AuditLogActionCategory.create,
AuditLogAction.emoji_update: AuditLogActionCategory.update,
AuditLogAction.emoji_delete: AuditLogActionCategory.delete,
AuditLogAction.message_delete: AuditLogActionCategory.delete,
AuditLogAction.message_bulk_delete: AuditLogActionCategory.delete,
AuditLogAction.message_pin: None,
AuditLogAction.message_unpin: None,
AuditLogAction.integration_create: AuditLogActionCategory.create,
AuditLogAction.integration_update: AuditLogActionCategory.update,
AuditLogAction.integration_delete: AuditLogActionCategory.delete,
AuditLogAction.stage_instance_create: AuditLogActionCategory.create,
AuditLogAction.stage_instance_update: AuditLogActionCategory.update,
AuditLogAction.stage_instance_delete: AuditLogActionCategory.delete,
AuditLogAction.sticker_create: AuditLogActionCategory.create,
AuditLogAction.sticker_update: AuditLogActionCategory.update,
AuditLogAction.sticker_delete: AuditLogActionCategory.delete,
AuditLogAction.thread_create: AuditLogActionCategory.create,
AuditLogAction.thread_update: AuditLogActionCategory.update,
AuditLogAction.thread_delete: AuditLogActionCategory.delete,
}
# fmt: on
return lookup[self]
@property
def target_type(self) -> Optional[str]:
v = self.value
if v == -1:
return 'all'
elif v < 10:
return 'guild'
elif v < 20:
return 'channel'
elif v < 30:
return 'user'
elif v < 40:
return 'role'
elif v < 50:
return 'invite'
elif v < 60:
return 'webhook'
elif v < 70:
return 'emoji'
elif v == 73:
return 'channel'
elif v < 80:
return 'message'
elif v < 83:
return 'integration'
elif v < 90:
return 'stage_instance'
elif v < 93:
return 'sticker'
elif v < 113:
return 'thread'
class UserFlags(Enum):
staff = 1
partner = 2
hypesquad = 4
bug_hunter = 8
mfa_sms = 16
premium_promo_dismissed = 32
hypesquad_bravery = 64
hypesquad_brilliance = 128
hypesquad_balance = 256
early_supporter = 512
team_user = 1024
system = 4096
has_unread_urgent_messages = 8192
bug_hunter_level_2 = 16384
verified_bot = 65536
verified_bot_developer = 131072
discord_certified_moderator = 262144
class ActivityType(Enum):
unknown = -1
playing = 0
streaming = 1
listening = 2
watching = 3
custom = 4
competing = 5
def __int__(self):
return self.value
class TeamMembershipState(Enum):
invited = 1
accepted = 2
class WebhookType(Enum):
incoming = 1
channel_follower = 2
application = 3
class ExpireBehaviour(Enum):
remove_role = 0
kick = 1
ExpireBehavior = ExpireBehaviour
class StickerType(Enum):
standard = 1
guild = 2
class StickerFormatType(Enum):
png = 1
apng = 2
lottie = 3
@property
def file_extension(self) -> str:
# fmt: off
lookup: Dict[StickerFormatType, str] = {
StickerFormatType.png: 'png',
StickerFormatType.apng: 'png',
StickerFormatType.lottie: 'json',
}
# fmt: on
return lookup[self]
class InviteTarget(Enum):
unknown = 0
stream = 1
embedded_application = 2
class InteractionType(Enum):
ping = 1
application_command = 2
component = 3
class InteractionResponseType(Enum):
pong = 1
# ack = 2 (deprecated)
# channel_message = 3 (deprecated)
channel_message = 4 # (with source)
deferred_channel_message = 5 # (with source)
deferred_message_update = 6 # for components
message_update = 7 # for components
class VideoQualityMode(Enum):
auto = 1
full = 2
def __int__(self):
return self.value
class ComponentType(Enum):
action_row = 1
button = 2
select = 3
def __int__(self):
return self.value
class ButtonStyle(Enum):
primary = 1
secondary = 2
success = 3
danger = 4
link = 5
# Aliases
blurple = 1
grey = 2
gray = 2
green = 3
red = 4
url = 5
def __int__(self):
return self.value
class StagePrivacyLevel(Enum):
public = 1
closed = 2
guild_only = 2
class NSFWLevel(Enum, comparable=True):
default = 0
explicit = 1
safe = 2
age_restricted = 3
T = TypeVar('T')
def create_unknown_value(cls: Type[T], val: Any) -> T:
value_cls = cls._enum_value_cls_ # type: ignore
name = f'unknown_{val}'
return value_cls(name=name, value=val)
def try_enum(cls: Type[T], val: Any) -> T:
"""A function that tries to turn the value into enum ``cls``.
If it fails it returns a proxy invalid value instead.
"""
try:
return cls._enum_value_map_[val] # type: ignore
except (KeyError, TypeError, AttributeError):
return create_unknown_value(cls, val)
| 28.222586
| 104
| 0.6201
|
a642159109b054ea2fc0ad08633229b778480794
| 4,813
|
py
|
Python
|
monster-classification/main.py
|
michalovsky/knowlegde-based-ai-mini-projects
|
8cfc00eb6b1c03d60bbd72e95a8ec1e50c294774
|
[
"MIT"
] | null | null | null |
monster-classification/main.py
|
michalovsky/knowlegde-based-ai-mini-projects
|
8cfc00eb6b1c03d60bbd72e95a8ec1e50c294774
|
[
"MIT"
] | null | null | null |
monster-classification/main.py
|
michalovsky/knowlegde-based-ai-mini-projects
|
8cfc00eb6b1c03d60bbd72e95a8ec1e50c294774
|
[
"MIT"
] | null | null | null |
from MonsterClassificationAgent import MonsterClassificationAgent
def test():
# This will run your code against the first four known test cases.
test_agent = MonsterClassificationAgent()
known_positive_1 = {'size': 'huge', 'color': 'black', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': True, 'has-tail': True}
known_positive_2 = {'size': 'large', 'color': 'white', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': True, 'has-tail': False}
known_positive_3 = {'size': 'huge', 'color': 'white', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': False, 'has-tail': True}
known_positive_4 = {'size': 'large', 'color': 'black', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 1,
'arm-count': 3, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': True, 'has-tail': True}
known_positive_5 = {'size': 'large', 'color': 'white', 'covering': 'fur', 'foot-type': 'foot', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': False, 'has-tail': False}
known_negative_1 = {'size': 'large', 'color': 'blue', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': True, 'has-tail': True}
known_negative_2 = {'size': 'tiny', 'color': 'red', 'covering': 'scales', 'foot-type': 'none', 'leg-count': 0,
'arm-count': 8, 'eye-count': 8, 'horn-count': 2, 'lays-eggs': False, 'has-wings': False,
'has_gills': False, 'has-tail': False}
known_negative_3 = {'size': 'medium', 'color': 'gray', 'covering': 'fur', 'foot-type': 'foot', 'leg-count': 2,
'arm-count': 6, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': False,
'has_gills': False, 'has-tail': False}
known_negative_4 = {'size': 'huge', 'color': 'black', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 6, 'eye-count': 2, 'horn-count': 2, 'lays-eggs': True, 'has-wings': True,
'has_gills': False, 'has-tail': False}
known_negative_5 = {'size': 'medium', 'color': 'purple', 'covering': 'fur', 'foot-type': 'talon', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': False, 'has-wings': False,
'has_gills': True, 'has-tail': False}
monster_list = [(known_positive_1, True), (known_positive_2, True), (known_positive_3, True),
(known_positive_4, True), (known_positive_5, True),
(known_negative_1, False), (known_negative_2, False), (known_negative_3, False),
(known_negative_4, False), (known_negative_5, False)]
new_monster_1 = {'size': 'large', 'color': 'black', 'covering': 'fur', 'foot-type': 'paw', 'leg-count': 1,
'arm-count': 3, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': True, 'has-tail': True}
new_monster_2 = {'size': 'tiny', 'color': 'red', 'covering': 'scales', 'foot-type': 'none', 'leg-count': 0,
'arm-count': 8, 'eye-count': 8, 'horn-count': 2, 'lays-eggs': False, 'has-wings': False,
'has_gills': False, 'has-tail': False}
new_monster_3 = {'size': 'large', 'color': 'gray', 'covering': 'fur', 'foot-type': 'foot', 'leg-count': 1,
'arm-count': 3, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': False, 'has-tail': False}
new_monster_4 = {'size': 'small', 'color': 'black', 'covering': 'scales', 'foot-type': 'paw', 'leg-count': 2,
'arm-count': 4, 'eye-count': 2, 'horn-count': 0, 'lays-eggs': True, 'has-wings': True,
'has_gills': False, 'has-tail': False}
print(test_agent.solve(monster_list, new_monster_1))
print(test_agent.solve(monster_list, new_monster_2))
print(test_agent.solve(monster_list, new_monster_3))
print(test_agent.solve(monster_list, new_monster_4))
if __name__ == "__main__":
test()
| 74.046154
| 117
| 0.536672
|
171e5d1eaa3e74753badb100c7403d322871124b
| 3,805
|
py
|
Python
|
nekos/nekos.py
|
ltzmax/maxcogs
|
6a3f8498eac22113aefca00b873c43910a1fad91
|
[
"MIT"
] | 5
|
2022-01-31T16:16:19.000Z
|
2022-03-19T23:34:11.000Z
|
nekos/nekos.py
|
ltzmax/maxcogs
|
6a3f8498eac22113aefca00b873c43910a1fad91
|
[
"MIT"
] | 4
|
2021-12-27T17:45:26.000Z
|
2022-01-27T13:12:04.000Z
|
nekos/nekos.py
|
ltzmax/maxcogs
|
6a3f8498eac22113aefca00b873c43910a1fad91
|
[
"MIT"
] | 3
|
2022-01-05T11:53:27.000Z
|
2022-03-17T00:05:22.000Z
|
"""
MIT License
Copyright (c) 2022-present ltzmax
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import logging
import aiohttp
import discord
from redbot.core import commands
log = logging.getLogger("red.maxcogs.nekos")
NEKOS_API = "https://nekos.best/api/v2/"
ICON = "https://cdn.discordapp.com/icons/850825316766842881/070d7465948cdcf9004630fa8629627b.webp?size=1024"
class Nekos(commands.Cog):
"""Sending nekos images from nekos.best."""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
def cog_unload(self):
asyncio.create_task(self.session.close())
__version__ = "0.1.13"
__author__ = "MAX"
def format_help_for_context(self, ctx: commands.Context) -> str:
"""Thanks Sinbad!"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nAuthor: {self.__author__}\nCog Version: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.guild)
@commands.max_concurrency(1, commands.BucketType.guild)
@commands.bot_has_permissions(embed_links=True, send_messages=True)
async def nekos(self, ctx):
"""Send a random neko image."""
async with self.session.get(NEKOS_API + "neko") as response:
if response.status != 200:
return await ctx.send(
"Something went wrong while trying to contact API."
)
url = await response.json()
artist_name = url["results"][0]["artist_name"]
source_url = url["results"][0]["source_url"]
artist_href = url["results"][0]["artist_href"]
emb = discord.Embed(
title="Here's a pic of neko",
description=f"**Artist:** [{artist_name}]({artist_href})\n**Source:** {source_url}",
)
emb.colour = await ctx.embed_color()
emb.set_image(url=url["results"][0]["url"])
emb.set_footer(text="Powered by nekos.best", icon_url=ICON)
try:
await ctx.send(embed=emb)
except discord.HTTPException as e:
await ctx.send(
"I was unable to send image, check logs for more details."
)
log.error(e)
@commands.command(hidden=True)
@commands.bot_has_permissions(embed_links=True)
async def nekoversion(self, ctx):
"""Shows the cog version."""
em = discord.Embed(
title="Cog Version:",
description=f"Author: {self.__author__}\nVersion: {self.__version__}",
colour=await ctx.embed_color(),
)
await ctx.send(embed=em)
| 37.673267
| 108
| 0.662286
|
0be080e4f3e2be772025f829825e96302d5832e5
| 222
|
py
|
Python
|
run_train.py
|
DunZhang/GPT2SourceCode
|
d598dbae278c93f88469d45ec025da4cfa7d69ee
|
[
"MIT"
] | 1
|
2021-06-25T02:21:27.000Z
|
2021-06-25T02:21:27.000Z
|
run_train.py
|
DunZhang/GPT2SourceCode
|
d598dbae278c93f88469d45ec025da4cfa7d69ee
|
[
"MIT"
] | null | null | null |
run_train.py
|
DunZhang/GPT2SourceCode
|
d598dbae278c93f88469d45ec025da4cfa7d69ee
|
[
"MIT"
] | null | null | null |
import logging
from Train import train
from utils import create_logger
from GPT2ChatbotConf import GPT2ChatbotConf
if __name__ == "__main__":
conf = GPT2ChatbotConf()
logger = create_logger(conf)
train(conf)
| 20.181818
| 43
| 0.765766
|
95573dd65a08e9d3ff89de0f625c674d992e630e
| 5,723
|
py
|
Python
|
nova/openstack/common/notifier/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/openstack/common/notifier/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/openstack/common/notifier/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:31:57.000Z
|
2020-07-24T08:31:57.000Z
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.openstack.common import cfg
from nova.openstack.common import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
deprecated_name='list_notifier_drivers',
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default='$host',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
""" decorator for notify which is used from utils.monkey_patch()
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id,
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
host = CONF.host
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. Payload=%(payload)s") %
locals())
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
add_driver(notification_driver)
return _drivers.values()
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError as e:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None
| 31.445055
| 79
| 0.646514
|
437d11a4e8115660062bc604bd804b850895b603
| 15,341
|
py
|
Python
|
examples/gfxdrawing.py
|
zmarvel/py-sdl2
|
fa91007e6eebcbf5838f08cfe8d0b9d5cdf3ab83
|
[
"CC0-1.0"
] | null | null | null |
examples/gfxdrawing.py
|
zmarvel/py-sdl2
|
fa91007e6eebcbf5838f08cfe8d0b9d5cdf3ab83
|
[
"CC0-1.0"
] | null | null | null |
examples/gfxdrawing.py
|
zmarvel/py-sdl2
|
fa91007e6eebcbf5838f08cfe8d0b9d5cdf3ab83
|
[
"CC0-1.0"
] | null | null | null |
"""2D drawing examples utilising the SDL2_gfx functions."""
import sys
import ctypes
from random import randint
import sdl2
import sdl2.sdlgfx
import sdl2.ext
# Draws random lines using the passed rendering context
def draw_lines(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
whalf = width // 2 - 2
hhalf = height // 2 - 2
lw = 5
x0, x1 = whalf, whalf
y0, y1 = 0, height
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
x0, x1 = 0, width
y0, y1 = hhalf, hhalf
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
for x in range(15):
# In the first quadrant, draw normal lines
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(0, whalf), randint(0, whalf)
y0, y1 = randint(0, hhalf), randint(0, hhalf)
sdl2.sdlgfx.lineColor(context.sdlrenderer, x0, y0, x1, y1, color)
# In the second quadrant, draw aa lines
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(whalf + lw, width), randint(whalf + lw, width)
y0, y1 = randint(0, hhalf), randint(0, hhalf)
sdl2.sdlgfx.aalineColor(context.sdlrenderer, x0, y0, x1, y1, color)
# In the third quadrant, draw horizontal lines
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(0, whalf), randint(0, whalf)
y0 = randint(hhalf + lw, height)
sdl2.sdlgfx.hlineColor(context.sdlrenderer, x0, x1, y0, color)
# In the fourth quadrant, draw vertical lines
color = randint(0, 0xFFFFFFFF)
x0 = randint(whalf + lw, width)
y0, y1 = randint(hhalf + lw, height), randint(hhalf + lw, height)
sdl2.sdlgfx.vlineColor(context.sdlrenderer, x0, y0, y1, color)
# Draws random circles using the passed rendering context
def draw_circles(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
wthird = width // 3 - 1
lw = 3
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, wthird, 0, wthird, height,
lw, 0xFFFFFFFF)
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, (2 * wthird + lw), 0,
(2 * wthird + lw), height, lw, 0xFFFFFFFF)
for x in range(15):
# In the first part, draw circles
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
r = randint(1, max(min(x, wthird - x), 2))
sdl2.sdlgfx.circleColor(context.sdlrenderer, x, y, r, color)
# In the second part, draw aa circles
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
r = randint(1, max(min(x, wthird - x), 2))
sdl2.sdlgfx.aacircleColor(context.sdlrenderer, x + wthird + lw, y, r,
color)
# In the third part, draw filled circles
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
r = randint(1, max(min(x, wthird - x), 2))
sdl2.sdlgfx.filledCircleColor(context.sdlrenderer, x + 2 * (wthird + lw),
y, r, color)
# Draws random ellipsis using the passed rendering context
def draw_ellipsis(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
wthird = width // 3 - 1
eheight = height // 4
lw = 3
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, wthird, 0, wthird, height,
lw, 0xFFFFFFFF)
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, (2 * wthird + lw), 0,
(2 * wthird + lw), height, lw, 0xFFFFFFFF)
for x in range(15):
# In the first part, draw ellipsis
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
rx, ry = randint(1, max(min(x, wthird - x), 2)), randint(0, eheight)
sdl2.sdlgfx.ellipseColor(context.sdlrenderer, x, y, rx, ry, color)
# In the second part, draw aa ellipsis
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
rx, ry = randint(1, max(min(x, wthird - x), 2)), randint(0, eheight)
sdl2.sdlgfx.aaellipseColor(context.sdlrenderer, x + wthird + lw, y,
rx, ry, color)
# In the third part, draw filled ellipsis
color = randint(0, 0xFFFFFFFF)
x, y = randint(0, wthird), randint(0, height)
rx, ry = randint(1, max(min(x, wthird - x), 2)), randint(0, eheight)
sdl2.sdlgfx.filledEllipseColor(context.sdlrenderer,
x + 2 * (wthird + lw), y, rx, ry,
color)
# Draws random rectangles using the passed rendering context
def draw_rects(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
whalf = width // 2 - 2
hhalf = height // 2 - 2
lw = 5
x0, x1 = whalf, whalf
y0, y1 = 0, height
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
x0, x1 = 0, width
y0, y1 = hhalf, hhalf
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
for x in range(15):
# In the first quadrant, draw normal rectangles
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(0, whalf), randint(0, whalf)
y0, y1 = randint(0, hhalf), randint(0, hhalf)
sdl2.sdlgfx.rectangleColor(context.sdlrenderer, x0, y0, x1, y1, color)
# In the second quadrant, draw rounded rectangles
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(whalf + lw, width), randint(whalf + lw, width)
y0, y1 = randint(0, hhalf), randint(0, hhalf)
r = randint(0, max(x1 - x0, x0 - x1))
sdl2.sdlgfx.roundedRectangleColor(context.sdlrenderer, x0, y0, x1, y1, r,
color)
# In the third quadrant, draw horizontal lines
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(0, whalf), randint(0, whalf)
y0, y1 = randint(hhalf + lw, height), randint(hhalf + lw, height)
sdl2.sdlgfx.boxColor(context.sdlrenderer, x0, y0, x1, y1, color)
# In the fourth quadrant, draw vertical lines
color = randint(0, 0xFFFFFFFF)
x0, x1 = randint(whalf + lw, width), randint(whalf + lw, width)
y0, y1 = randint(hhalf + lw, height), randint(hhalf + lw, height)
r = randint(1, max(x1 - x0, x0 - x1))
sdl2.sdlgfx.roundedBoxColor(context.sdlrenderer, x0, y0, x1, y1, r,
color)
# Draws random triangles using the passed rendering context
def draw_trigons(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
wthird = width // 3 - 1
lw = 3
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, wthird, 0, wthird, height,
lw, 0xFFFFFFFF)
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, (2 * wthird + lw), 0,
(2 * wthird + lw), height, lw, 0xFFFFFFFF)
for x in range(15):
# In the first part, draw triangles
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(0, wthird), randint(0, height)
x1, y1 = randint(0, wthird), randint(0, height)
x2, y2 = randint(0, wthird), randint(0, height)
sdl2.sdlgfx.trigonColor(context.sdlrenderer, x0, y0, x1, y1, x2, y2,
color)
# In the second part, draw aa triangles
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(0, wthird) + wthird + lw, randint(0, height)
x1, y1 = randint(0, wthird) + wthird + lw, randint(0, height)
x2, y2 = randint(0, wthird) + wthird + lw, randint(0, height)
sdl2.sdlgfx.aatrigonColor(context.sdlrenderer, x0, y0, x1, y1, x2, y2,
color)
# In the third part, draw filled triangles
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(0, wthird) + 2 * (wthird + lw), randint(0, height)
x1, y1 = randint(0, wthird) + 2 * (wthird + lw), randint(0, height)
x2, y2 = randint(0, wthird) + 2 * (wthird + lw), randint(0, height)
sdl2.sdlgfx.filledTrigonColor(context.sdlrenderer, x0, y0, x1, y1,
x2, y2, color)
# Draws random polygons using the passed rendering context
def draw_polygons(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
wthird = width // 3 - 1
lw = 3
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, wthird, 0, wthird, height,
lw, 0xFFFFFFFF)
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, (2 * wthird + lw), 0,
(2 * wthird + lw), height, lw, 0xFFFFFFFF)
for x in range(5):
# In the first part, draw polygons
color = randint(0, 0xFFFFFFFF)
ptcount = randint(3, 10)
xlist, ylist = (sdl2.Sint16 * ptcount)(), (sdl2.Sint16 * ptcount)()
for k in range(ptcount):
xlist[k] = randint(0, wthird)
ylist[k] = randint(0, height)
xptr = ctypes.cast(xlist, ctypes.POINTER(sdl2.Sint16))
yptr = ctypes.cast(ylist, ctypes.POINTER(sdl2.Sint16))
sdl2.sdlgfx.polygonColor(context.sdlrenderer, xptr, yptr, ptcount, color)
# In the second part, draw aa polygons
color = randint(0, 0xFFFFFFFF)
ptcount = randint(3, 10)
xlist, ylist = (sdl2.Sint16 * ptcount)(), (sdl2.Sint16 * ptcount)()
for k in range(ptcount):
xlist[k] = randint(0, wthird) + wthird + lw
ylist[k] = randint(0, height)
xptr = ctypes.cast(xlist, ctypes.POINTER(sdl2.Sint16))
yptr = ctypes.cast(ylist, ctypes.POINTER(sdl2.Sint16))
sdl2.sdlgfx.aapolygonColor(context.sdlrenderer, xptr, yptr, ptcount,
color)
# In the third part, draw filled polygons
color = randint(0, 0xFFFFFFFF)
ptcount = randint(3, 10)
xlist, ylist = (sdl2.Sint16 * ptcount)(), (sdl2.Sint16 * ptcount)()
for k in range(ptcount):
xlist[k] = randint(0, wthird) + 2 * (wthird + lw)
ylist[k] = randint(0, height)
xptr = ctypes.cast(xlist, ctypes.POINTER(sdl2.Sint16))
yptr = ctypes.cast(ylist, ctypes.POINTER(sdl2.Sint16))
sdl2.sdlgfx.filledPolygonColor(context.sdlrenderer, xptr, yptr, ptcount,
color)
# Draw random elements using the passed rendering context
def draw_mixed(context, width, height):
# Reset the visible area with a black color.
context.clear(0)
# Split the visible area
whalf = width // 2 - 2
hhalf = height // 2 - 2
lw = 5
x0, x1 = whalf, whalf
y0, y1 = 0, height
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
x0, x1 = 0, width
y0, y1 = hhalf, hhalf
sdl2.sdlgfx.thickLineColor(context.sdlrenderer, x0, y0, x1, y1, lw,
0xFFFFFFFF)
for x in range(15):
# In the first quadrant, draw arcs
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(0, whalf), randint(0, hhalf)
rad = randint(0, min(whalf - x0, hhalf - y0))
start, end = randint(0, 360), randint(0, 360)
sdl2.sdlgfx.arcColor(context.sdlrenderer, x0, y0, rad, start, end, color)
# In the second quadrant, draw bezier curves
color = randint(0, 0xFFFFFFFF)
ptcount = randint(3, 10)
xlist, ylist = (sdl2.Sint16 * ptcount)(), (sdl2.Sint16 * ptcount)()
for k in range(ptcount):
xlist[k] = randint(whalf, width)
ylist[k] = randint(0, hhalf)
steps = randint(2, 10)
xptr = ctypes.cast(xlist, ctypes.POINTER(sdl2.Sint16))
yptr = ctypes.cast(ylist, ctypes.POINTER(sdl2.Sint16))
sdl2.sdlgfx.bezierColor(context.sdlrenderer, xptr, yptr, ptcount, steps,
color)
# In the third quadrant, draw pies
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(0, whalf), randint(hhalf + lw, height)
rad = randint(0, min(whalf - x0, y0 - (hhalf + lw)))
start, end = randint(0, 360), randint(0, 360)
sdl2.sdlgfx.pieColor(context.sdlrenderer, x0, y0, rad, start, end, color)
# In the fourth quadrant, draw filled pies
color = randint(0, 0xFFFFFFFF)
x0, y0 = randint(whalf + lw, width), randint(hhalf + lw, height)
rad = randint(0, min(x0 - (whalf + lw), y0 - (hhalf + lw)))
start, end = randint(0, 360), randint(0, 360)
sdl2.sdlgfx.filledPieColor(context.sdlrenderer, x0, y0, rad, start, end,
color)
def run():
# You know those from the helloworld.py example.
# Initialize the video subsystem, create a window and make it visible.
sdl2.ext.init()
window = sdl2.ext.Window("sdlgfx drawing examples", size=(800, 600))
window.show()
# Create a rendering context for the window. The sdlgfx module requires it.
context = sdl2.ext.Renderer(window)
# We implement the functionality as it was done in colorpalettes.py and
# utilise a mapping table to look up the function to be executed, together
# with the arguments they should receive
functions = ((draw_lines, (context, 800, 600)),
(draw_circles, (context, 800, 600)),
(draw_ellipsis, (context, 800, 600)),
(draw_rects, (context, 800, 600)),
(draw_trigons, (context, 800, 600)),
(draw_polygons, (context, 800, 600)),
(draw_mixed, (context, 800, 600))
)
# A storage variable for the function we are currently on, so that we know
# which function to execute next.
curindex = 0
draw_lines(context, 800, 600)
# The event loop is nearly the same as we used in colorpalettes.py. If you
# do not know, what happens here, take a look at colorpalettes.py for a
# detailed description.
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = False
break
if event.type == sdl2.SDL_MOUSEBUTTONDOWN:
curindex += 1
if curindex >= len(functions):
curindex = 0
# In contrast to colorpalettes.py, our mapping table consists
# of functions and their arguments. Thus, we get the currently
# requested function and argument tuple and execute the
# function with the arguments.
func, args = functions[curindex]
func(*args)
break
context.present()
sdl2.ext.quit()
return 0
if __name__ == "__main__":
sys.exit(run())
| 44.725948
| 81
| 0.581905
|
4ed1458ad29d009d1d612cf05f75116199668050
| 4,041
|
py
|
Python
|
dashboard/about/imprint/imprintView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 11
|
2021-02-26T21:27:56.000Z
|
2022-01-03T06:19:09.000Z
|
dashboard/about/imprint/imprintView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 216
|
2021-02-27T12:09:59.000Z
|
2022-03-28T19:44:37.000Z
|
dashboard/about/imprint/imprintView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 2
|
2021-05-05T21:32:18.000Z
|
2022-02-21T11:54:01.000Z
|
import dash_html_components as html
import dash_bootstrap_components as dbc
class imprintViewClass:
def getImprintContent(self):
content = [dbc.Card(dbc.CardBody([dbc.Row(dbc.Col(self.createImprintContent()))]))]
return content
@staticmethod
def createImprintContent():
contentImprint = [html.H4("Imprint"),
html.P(['The website defichain-analytics.com is operated by:', html.Br(),
'Daniel Zirkel', html.Br(),
'Oeschelbronner Weg 4', html.Br(),
'75446 Wiernsheim' , html.Br(),
'Germany', html.Br()]),
html.P(['Telegram: ', html.A('https://t.me/DanielZirkel', href='https://t.me/DanielZirkel', target='_blank', className='defiLink'), html.Br(),
'e-mail: cakereviewdashboard@gmail.com']),
html.P(['Responsible for the contents in the sense of § 5 TMG § 18 Abs. 2 MStV:',html.Br(),
'Daniel Zirkel']),
html.P(['1. limitation of liability', html.Br(),
'The contents of this website are created with the greatest possible care. However, the provider does not guarantee the accuracy, completeness and '
'timeliness of the content provided. The use of the contents of the website is at the user''s own risk. Contributions identified by name reflect the '
'opinion of the respective author and not always the opinion of the provider. The mere use of the provider''s website does not constitute any '
'contractual relationship between the user and the provider.']),
html.P(['2. external links', html.Br(),
'This website contains links to third-party websites ("external links"). These websites are subject to the liability of the respective operators. '
'When the external links were first created, the provider checked the external content for any legal violations. At that time, no legal violations'
'were apparent. The provider has no influence on the current and future design and content of the linked pages. The inclusion of external links '
'does not imply that the provider adopts the content behind the reference or link as its own. It is not reasonable for the provider to constantly '
'monitor the external links without concrete evidence of legal violations. However, such external links will be deleted immediately if legal '
'violations become known']),
html.P(['3. copyrights and ancillary copyrights', html.Br(),
'The contents published on this website are subject to German copyright and ancillary copyright law. Any use not permitted by German copyright '
'and ancillary copyright law requires the prior written consent of the provider or the respective copyright holder. This applies in particular '
'to the copying, editing, translation, storage, processing or reproduction of content in databases or other electronic media and systems. '
'Contents and rights of third parties are marked as such. The unauthorized reproduction or distribution of individual content or complete pages '
'is not permitted and is punishable by law. Only the production of copies and downloads for personal, private and non-commercial use is '
'permitted. The display of this website in external frames is only permitted with written permission.']),
]
return contentImprint
| 91.840909
| 184
| 0.590943
|
80a3c91569a53f9f11f00ad98a198d76cfe7297d
| 1,859
|
py
|
Python
|
Google Drive/Learning/Python/General Python Tutorial/Training/27_Generate_Random_Numbers.py
|
mobiusworkspace/mobiuswebsite
|
73eef1bd4fc07ea318aad431de09eac10fc4da3a
|
[
"CC-BY-3.0"
] | null | null | null |
Google Drive/Learning/Python/General Python Tutorial/Training/27_Generate_Random_Numbers.py
|
mobiusworkspace/mobiuswebsite
|
73eef1bd4fc07ea318aad431de09eac10fc4da3a
|
[
"CC-BY-3.0"
] | null | null | null |
Google Drive/Learning/Python/General Python Tutorial/Training/27_Generate_Random_Numbers.py
|
mobiusworkspace/mobiuswebsite
|
73eef1bd4fc07ea318aad431de09eac10fc4da3a
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 15:42:16 2019
@author: OAdeoye
"""
import random
# Gets floating value between the range
value = random.uniform(1,10)
print(value)
# Gets integer value
value = random.randint(0,2)
print(value)
greetings = ['Hello', 'Hi', 'Hey', 'Howdy', 'Hola']
value = random.choice(greetings)
print(value + ', Dotmons')
#used to print the random values 10 times. Variable must always be k
value = random.choices(greetings, k=10)
print('Random choice: ' + str(value))
#Weighting random values from the list
colors = ['Blue', 'Yellow', 'Red', 'Green']
result = random.choices(colors, k=10, weights=[18,20,2,35])
print('Random weighted choise: ' + str(result))
#To shuffle a list betweek 1 and 53
deck = list(range(1,53))
random.shuffle(deck)
print('1 to 53 random decks: ' + str(deck))
#to print unique card from the sequence
hand = random.sample(deck, k=5)
print('unique 5 values: ' + str(hand))
#Real life scenario:
first_names = ['John', 'Jane', 'Dotun']
last_names = ['Ade', 'Bola', 'Adeoye']
street_names = ['Davidson', 'High', 'Calerton']
fake_cities = ['Lagos', 'Ekiti', 'Ikeja']
states = ['ON', 'SK', 'YK']
email_domain = ['@dotmons.com', '@computer.com', '@waterloo.ca']
for num in range(3):
first = random.choice(first_names)
last = random.choice(last_names)
phone = f'{random.randint(100, 999)}-888-{random.randint(1000,9999)}'
street_num = random.randint(100, 999)
street = random.choice(street_names)
city = random.choice(fake_cities)
state = random.choice(states)
zip_code = random.randint(10000, 99999)
address = f'{street_num} {street} St., {city} {state} {zip_code}'
email_d = random.choice(email_domain)
email = first.lower() + last.lower() + email_d
print(f'{first} {last}\n{phone}\n{address}\n{email}\n')
| 26.183099
| 73
| 0.658419
|
0c401458957ea3467513c8558095c6ca8848b925
| 139
|
py
|
Python
|
build/lib/lrc_kit/__init__.py
|
reteps/python-lrc-search
|
1ff26a998f0c86afc07009eca3f6fd5059f81200
|
[
"MIT"
] | 6
|
2021-01-11T22:01:46.000Z
|
2022-02-04T21:37:11.000Z
|
build/lib/lrc_kit/__init__.py
|
reteps/python-lrc-search
|
1ff26a998f0c86afc07009eca3f6fd5059f81200
|
[
"MIT"
] | 5
|
2021-01-11T23:17:28.000Z
|
2022-01-25T20:14:18.000Z
|
build/lib/lrc_kit/__init__.py
|
reteps/python-lrc-search
|
1ff26a998f0c86afc07009eca3f6fd5059f81200
|
[
"MIT"
] | null | null | null |
from lrc_kit.line import LyricLine, Word
from lrc_kit.lyrics import Lyrics
from lrc_kit.parser import parse_lyrics
from .providers import *
| 34.75
| 40
| 0.841727
|
0c8413a40b992e43221feb5c66f8c057f64b6965
| 3,206
|
py
|
Python
|
tools/coda/coda/keystone.py
|
osuosl/osops
|
602d0fd4d6a29c5a7d186385a27391b39afffe86
|
[
"Apache-2.0"
] | 2
|
2021-04-23T02:29:37.000Z
|
2021-12-06T06:01:31.000Z
|
tools/coda/coda/keystone.py
|
osuosl/osops
|
602d0fd4d6a29c5a7d186385a27391b39afffe86
|
[
"Apache-2.0"
] | null | null | null |
tools/coda/coda/keystone.py
|
osuosl/osops
|
602d0fd4d6a29c5a7d186385a27391b39afffe86
|
[
"Apache-2.0"
] | 1
|
2022-03-21T18:10:30.000Z
|
2022-03-21T18:10:30.000Z
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All keystone interactions go here.
Not much else to say.
"""
from django.conf import settings
import json
import requests
JSON_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def get_coda_token():
"""Return the auth token for the coda user."""
payload = {
"auth": {
"tenantId": settings.CODA_TENANT_ID,
"passwordCredentials": {
"username": settings.CODA_USERNAME,
"password": settings.CODA_PASSWORD
}
}
}
result = 'error'
try:
response = requests.post(
"%s/tokens" % settings.CODA_AUTH_URL,
data=json.dumps(payload),
headers=JSON_HEADERS,
verify=False)
result = json.loads(response.text)['access']['token']['id']
except Exception as ex:
print("error in get_coda_token", ex)
return result
def get_project_users(auth_token, project_id):
"""Return a map of user info for a given project."""
headers = {
"X-Auth-Token": auth_token,
"Accept": "application/json",
}
response = requests.get(
"%s/tenants/%s/users" % (settings.CODA_KEYSTONE_URL, project_id),
headers=headers,
verify=False)
return json.loads(response.text)['users']
def user_authenticate(tenant_id, username, password):
"""Get the auth token for a user of Coda."""
payload = {
"auth": {
"tenantId": tenant_id,
"passwordCredentials": {
"username": username,
"password": password
}
}
}
result = 'error'
try:
response = requests.post("%s/tokens" % settings.CODA_AUTH_URL,
data=json.dumps(payload),
headers=JSON_HEADERS,
verify=False)
result = json.loads(response.text)['access']['token']['id']
except Exception as ex:
print("error in user_authenticate", ex)
return result
def project_exists(auth_token, project_id):
"""Check if the project id is valid / exists.
Returns true with info if it does and false and empty if not.
"""
headers = {
"X-Auth-Token": auth_token,
"Accept": "application/json",
}
response = requests.get(
"%s/tenants/%s" % (settings.CODA_KEYSTONE_URL, project_id),
headers=headers,
verify=False)
if response.status_code == 200:
return True, json.loads(response.text)
else:
return False, ''
| 27.169492
| 74
| 0.603868
|
a0bb03659f3b632c64c2443226539c3adbaabe05
| 30,687
|
py
|
Python
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/kusto/azext_kusto/tests/latest/example_steps.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .. import try_manual
# EXAMPLE: /AttachedDatabaseConfigurations/put/AttachedDatabaseConfigurationsCreateOrUpdate
@try_manual
def step_attached_database_configuration_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration create '
'--name "{myAttachedDatabaseConfiguration}" '
'--cluster-name "{myCluster1}" '
'--location "westus2" '
'--cluster-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Kusto/Clu'
'sters/{myCluster}" '
'--database-name "KustoDatabase" '
'--default-principals-modification-kind "Union" '
'--table-level-sharing-properties external-tables-to-exclude="ExternalTable2" '
'external-tables-to-include="ExternalTable1" materialized-views-to-exclude="MaterializedViewTable2" '
'materialized-views-to-include="MaterializedViewTable1" tables-to-exclude="Table2" '
'tables-to-include="Table1" '
'--resource-group "{rg}" ',
checks=[])
test.cmd('az kusto attached-database-configuration wait --created '
'--name "{myAttachedDatabaseConfiguration}" '
'--cluster-name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/get/AttachedDatabaseConfigurationsGet
@try_manual
def step_attached_database_configuration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration show '
'--name "{myAttachedDatabaseConfiguration}" '
'--cluster-name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/get/KustoAttachedDatabaseConfigurationsListByCluster
@try_manual
def step_attached_database_configuration_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration list '
'--cluster-name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/put/KustoClustersCreateOrUpdate
@try_manual
def step_cluster_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster create '
'--cluster-name "{myCluster}" '
'--type="SystemAssigned" '
'--location "westus2" '
'--allowed-ip-range-list "0.0.0.0/0" '
'--enable-auto-stop true '
'--enable-purge true '
'--enable-streaming-ingest true '
'--key-vault-properties key-name="" key-vault-uri="" key-version="" '
'--sku name="Standard_D11_v2" capacity=2 tier="Standard" '
'--public-network-access "Enabled" '
'--resource-group "{rg}" ',
checks=[])
test.cmd('az kusto cluster wait --created '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=[])
# EXAMPLE: /Clusters/put/KustoClustersCreateOrUpdate
@try_manual
def step_leader_cluster_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster create '
'--cluster-name "{myCluster1}" '
'--type="SystemAssigned" '
'--location "westus2" '
'--enable-purge true '
'--enable-streaming-ingest true '
'--sku name="Standard_D11_v2" capacity=2 tier="Standard" '
'--resource-group "{rg}" ',
checks=[])
test.cmd('az kusto cluster wait --created '
'--cluster-name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=[])
# EXAMPLE: /Clusters/get/Get Kusto cluster outbound network dependencies
@try_manual
def step_cluster_list_outbound(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-outbound-network-dependency-endpoint '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersGet
@try_manual
def step_cluster_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster show '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersList
@try_manual
def step_cluster_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list '
'-g "" ',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListByResourceGroup
@try_manual
def step_cluster_list_by_resource_group(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListResourceSkus
@try_manual
def step_cluster_list_sku_by_resource_group(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-sku '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/get/KustoClustersListSkus
@try_manual
def step_cluster_list_sku(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-sku '
'-g "" ',
checks=checks)
# EXAMPLE: /Clusters/patch/KustoClustersUpdate
@try_manual
def step_cluster_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster update '
'--name "{myCluster1}" '
'--type "SystemAssigned" '
'--location "westus2" '
'--enable-auto-stop true '
'--enable-purge true '
'--enable-streaming-ingest true '
'--engine-type "V3" '
'--restrict-outbound-network-access "Disabled" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterAddLanguageExtensions
@try_manual
def step_cluster_add_language_extension(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster add-language-extension '
'--name "{myCluster}" '
'--value language-extension-name="PYTHON" '
'--value language-extension-name="R" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterDetachFollowerDatabases
@try_manual
def step_cluster_detach_follower_database(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster detach-follower-database '
'--name "{myCluster}" '
'--attached-database-configuration-name "{myAttachedDatabaseConfiguration}" '
'--cluster-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Kusto/clu'
'sters/{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterDiagnoseVirtualNetwork
@try_manual
def step_cluster_diagnose_virtual_network(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster diagnose-virtual-network '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterListFollowerDatabases
@try_manual
def step_cluster_list_follower_database(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-follower-database '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterListLanguageExtensions
@try_manual
def step_cluster_list_language_extension(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster list-language-extension '
'--name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClusterRemoveLanguageExtensions
@try_manual
def step_cluster_remove_language_extension(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster remove-language-extension '
'--name "{myCluster}" '
'--value language-extension-name="PYTHON" '
'--value language-extension-name="R" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClustersStart
@try_manual
def step_cluster_start(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster start '
'--name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/post/KustoClustersStop
@try_manual
def step_cluster_stop(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster stop '
'--name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/put/KustoClusterPrincipalAssignmentsCreateOrUpdate
@try_manual
def step_cluster_principal_assignment_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment create '
'--cluster-name "{myCluster}" '
'--principal-assignment-name "testcli1" '
'--principal-type "App" '
'--principal-id "ad064aa1-8b51-41ec-9c64-0d3037577d63" '
'--role "AllDatabasesAdmin" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/get/KustoClusterPrincipalAssignmentsGet
@try_manual
def step_cluster_principal_assignment_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment show '
'--cluster-name "{myCluster}" '
'--principal-assignment-name "testcli1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/get/KustoPrincipalAssignmentsList
@try_manual
def step_cluster_principal_assignment_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/put/KustoDatabasePrincipalAssignmentsCreateOrUpdate
@try_manual
def step_database_principal_assignment_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment create '
'--cluster-name "{myCluster}" '
'--database-name "kustoDatabase" '
'--role "Admin" '
'--principal-type "App" '
'--principal-id "ad064aa1-8b51-41ec-9c64-0d3037577d63" '
'--principal-assignment-name "testcli1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/get/KustoDatabasePrincipalAssignmentsGet
@try_manual
def step_database_principal_assignment_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment show '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--principal-assignment-name "testcli1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/get/KustoPrincipalAssignmentsList
@try_manual
def step_database_principal_assignment_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment list '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DatabasePrincipalAssignments/delete/KustoDatabasePrincipalAssignmentsDelete
@try_manual
def step_database_principal_assignment_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database-principal-assignment delete -y '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--principal-assignment-name "testcli1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/put/Kusto ReadWrite database create or update
@try_manual
def step_database_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database create '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--read-write-database location="westus2" soft-delete-period="P1D" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/get/KustoDatabasesGet
@try_manual
def step_database_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database show '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/get/KustoDatabasesListByCluster
@try_manual
def step_database_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/patch/KustoDatabasesUpdate
@try_manual
def step_database_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database update '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--read-write-database hot-cache-period="P1D" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseAddPrincipals
@try_manual
def step_database_add_principal(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database add-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--value name="clitest" type="App" app-id="7bd74dc8-dd5f-4cee-8e64-866138abcf89" role="Admin" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseListPrincipals
@try_manual
def step_database_list_principal(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database list-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/post/KustoDatabaseRemovePrincipals
@try_manual
def step_database_remove_principal(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database remove-principal '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--value name="clitest" type="App" app-id="7bd74dc8-dd5f-4cee-8e64-866138abcf89" role="Admin" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Databases/delete/KustoDatabasesDelete
@try_manual
def step_database_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto database delete -y '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DataConnections/put/KustoDataConnectionsCreateOrUpdate
@try_manual
def step_data_connection_event_hub_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub create '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase" '
'--location "westus2" '
'--consumer-group "$Default" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.EventHu'
'b/namespaces/testcli/eventhubs/eventhubTest1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DataConnections/get/KustoDatabasesListByCluster
@try_manual
def step_data_connection_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection list '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DataConnections/get/KustoDataConnectionsGet
@try_manual
def step_data_connection_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection show '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DataConnections/patch/KustoDataConnectionsUpdate
@try_manual
def step_data_connection_event_hub_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub update '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase" '
'--location "westus2" '
'--resource-group "{rg}" '
'--consumer-group "$Default" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.EventHu'
'b/namespaces/testcli/eventhubs/eventhubTest1" ',
checks=checks)
# EXAMPLE: /DataConnections/post/KustoDataConnectionValidation
@try_manual
def step_data_connection_validation(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection event-hub data-connection-validation '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--name "{myDataConnection}" '
'--consumer-group "$Default" '
'--event-hub-resource-id "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.EventHu'
'b/namespaces/testcli/eventhubs/eventhubTest1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /DataConnections/delete/KustoDataConnectionsDelete
@try_manual
def step_data_connection_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto data-connection delete -y '
'--cluster-name "{myCluster}" '
'--name "{myDataConnection}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ManagedPrivateEndpoints/put/KustoManagedPrivateEndpointsCreateOrUpdate
@try_manual
def step_managed_private_endpoint_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto managed-private-endpoint create '
'--cluster-name "{myCluster}" '
'--name "{myManagedPrivateEndpoint}" '
'--group-id "blob" '
'--private-link-resource-id "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.Stor'
'age/storageAccounts/clisatest" '
'--request-message "Please Approve." '
'--resource-group "{rg}" ',
checks=[])
test.cmd('az kusto managed-private-endpoint wait --created '
'--cluster-name "{myCluster}" '
'--name "{myManagedPrivateEndpoint}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ManagedPrivateEndpoints/get/KustoManagedPrivateEndpointsGet
@try_manual
def step_managed_private_endpoint_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto managed-private-endpoint show '
'--cluster-name "{myCluster}" '
'--name "{myManagedPrivateEndpoint}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ManagedPrivateEndpoints/get/KustoManagedPrivateEndpointsList
@try_manual
def step_managed_private_endpoint_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto managed-private-endpoint list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ManagedPrivateEndpoints/patch/KustoManagedPrivateEndpointsUpdate
@try_manual
def step_managed_private_endpoint_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto managed-private-endpoint update '
'--cluster-name "{myCluster}" '
'--name "{myManagedPrivateEndpoint}" '
'--group-id "blob" '
'--private-link-resource-id "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.Stor'
'age/storageAccounts/clisatest" '
'--request-message "Please Approve." '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ManagedPrivateEndpoints/delete/ManagedPrivateEndpointsDelete
@try_manual
def step_managed_private_endpoint_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto managed-private-endpoint delete -y '
'--cluster-name "{myCluster}" '
'--name "{myManagedPrivateEndpoint}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Clusters/delete/KustoClustersDelete
@try_manual
def step_cluster_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster delete -y '
'--name "{myCluster1}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /AttachedDatabaseConfigurations/delete/AttachedDatabaseConfigurationsDelete
@try_manual
def step_attached_database_configuration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto attached-database-configuration delete -y '
'--name "{myAttachedDatabaseConfiguration}" '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /ClusterPrincipalAssignments/delete/KustoClusterPrincipalAssignmentsDelete
@try_manual
def step_cluster_principal_assignment_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto cluster-principal-assignment delete -y '
'--cluster-name "{myCluster}" '
'--principal-assignment-name "testcli1" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /OperationsResults/get/KustoOperationResultsGet
@try_manual
def step_operation_result_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto operation-result show '
'--operation-id "30972f1b-b61d-4fd8-bd34-3dcfa24670f3" '
'--location "westus2" ',
checks=checks)
# EXAMPLE: /PrivateEndpointConnections/put/Approve or reject a private endpoint connection with a given name.
@try_manual
def step_private_endpoint_connection_create(test, checks=None):
test.cmd('az network private-endpoint create '
'-n "{myPrivateEndpoint}" '
'-g "testrg" '
'--group-id "cluster" '
'--manual-request true '
'--subnet "/subscriptions/{subscription_id}/resourceGroups/testrg/providers/Microsoft.Network/virtualNetworks/MyVnet/subnets/MySubnet" '
'--private-connection-resource-id "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Kusto/Clusters/{myCluster}" '
'--connection-name "test"',
checks=checks)
step_private_endpoint_connection_list(test)
test.cmd('az kusto private-endpoint-connection create '
'--cluster-name "{myCluster}" '
'--private-link-service-connection-state description="Approved by test" status="Approved" '
'--name "{myPrivateEndpointConnection}" '
'--resource-group "{rg}"',
checks=[])
test.cmd('az kusto private-endpoint-connection wait --created '
'--cluster-name "{myCluster}" '
'--name "{myPrivateEndpointConnection}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /PrivateEndpointConnections/get/Gets private endpoint connection.
@try_manual
def step_private_endpoint_connection_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto private-endpoint-connection show '
'--cluster-name "{myCluster}" '
'--name "{myPrivateEndpointConnection}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /PrivateEndpointConnections/get/KustoPrivateEndpointConnectionsList
@try_manual
def step_private_endpoint_connection_list(test, checks=None):
if checks is None:
checks = []
myPrivateEndpointConnectionRes = test.cmd('az kusto private-endpoint-connection list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks).get_output_in_json()
print(myPrivateEndpointConnectionRes[0]["name"])
test.kwargs.update({
'myPrivateEndpointConnection': myPrivateEndpointConnectionRes[0]["name"]
})
# EXAMPLE: /PrivateEndpointConnections/delete/Deletes a private endpoint connection with a given name.
@try_manual
def step_private_endpoint_connection_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto private-endpoint-connection delete -y '
'--cluster-name "{myCluster}" '
'--name "{myPrivateEndpointConnection}" '
'--resource-group "{rg}" ',
checks=checks)
test.cmd('az network private-endpoint delete '
'-n "{myPrivateEndpoint}" '
'-g "testrg" ',
checks=checks)
# EXAMPLE: /PrivateLinkResources/get/Gets private endpoint connection.
@try_manual
def step_private_link_resource_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto private-link-resource show '
'--cluster-name "{myCluster}" '
'--name "cluster" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /PrivateLinkResources/get/Gets private endpoint connections.
@try_manual
def step_private_link_resource_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto private-link-resource list '
'--cluster-name "{myCluster}" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Scripts/put/KustoScriptsCreateOrUpdate
@try_manual
def step_script_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script create '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--continue-on-errors true '
'--script-url "https://clisatest.blob.core.windows.net/kustosa/kustoscript.txt" '
'--script-url-sas-token "sp=r&st=2021-10-10T13:27:37Z&se=2024-09-06T21:27:37Z&sv=2020-08-04&sr=b&sig=bQv3g3VpesJ3nv9Fbs%2Bc0V0PQby9AY%2BFLzQBxk5a9us%3D" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=[])
test.cmd('az kusto script wait --created '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=checks)
# EXAMPLE: /Scripts/get/KustoScriptsGet
@try_manual
def step_script_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script show '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=checks)
# EXAMPLE: /Scripts/get/KustoScriptsList
@try_manual
def step_script_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script list '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" ',
checks=checks)
# EXAMPLE: /Scripts/patch/KustoScriptsUpdate
@try_manual
def step_script_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script update '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--continue-on-errors true '
'--script-url "https://clisatest.blob.core.windows.net/kustosa/kustoscript.txt" '
'--script-url-sas-token "sp=r&st=2021-10-10T13:27:37Z&se=2024-09-06T21:27:37Z&sv=2020-08-04&sr=b&sig=bQv3g3VpesJ3nv9Fbs%2Bc0V0PQby9AY%2BFLzQBxk5a9us%3D" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=checks)
# EXAMPLE: /Scripts/delete/KustoScriptsDelete
@try_manual
def step_script_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az kusto script delete -y '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=[])
test.cmd('az kusto script wait --deleted '
'--cluster-name "{myCluster}" '
'--database-name "KustoDatabase" '
'--resource-group "{rg}" '
'--name "{myScript}" ',
checks=checks)
| 36.102353
| 167
| 0.611138
|
78c3383883a9d05f1f129fbf5e14820cd98861d5
| 10,224
|
py
|
Python
|
bcs-ui/backend/uniapps/application/all_views/views.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | null | null | null |
bcs-ui/backend/uniapps/application/all_views/views.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | null | null | null |
bcs-ui/backend/uniapps/application/all_views/views.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import copy
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from backend.components import paas_cc
from backend.container_service.clusters.base.utils import get_cluster_type
from backend.container_service.clusters.constants import ClusterType
from backend.utils.errcodes import ErrorCode
from ..base_views import error_codes
from ..constants import CATEGORY_MAP
from ..filters.base_metrics import BaseNamespaceMetric
from ..utils import APIResponse, cluster_env, exclude_records
from . import k8s_views
CLUSTER_ENV_MAP = settings.CLUSTER_ENV_FOR_FRONT
class GetProjectNamespace(BaseNamespaceMetric):
def get_namespace(self, request, project_id):
"""获取namespace"""
resp = paas_cc.get_namespace_list(request.user.token.access_token, project_id, desire_all_data=True)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError.f(resp.get("message"))
data = resp.get("data") or {}
return data.get("results") or []
def get_cluster_list(self, request, project_id, cluster_ids):
"""根据cluster_id获取集群信息"""
resp = paas_cc.get_cluster_list(request.user.token.access_token, project_id, cluster_ids)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError.f(resp.get("message"))
data = resp.get("data") or []
# if not data:
# raise error_codes.APIError.f("查询集群信息为空")
return data
def compose_data(
self, ns_map, cluster_env, ns_app, exist_app, ns_inst_error_count, create_error, app_status, all_ns_inst_count
):
"""组装返回数据"""
ns_map_copy = copy.deepcopy(ns_map)
for key, val in ns_map_copy.items():
cluster_id = val["cluster_id"]
error_num = (ns_inst_error_count.get((cluster_id, key[1])) or 0) + (create_error.get(int(key[0])) or 0)
ns_map[key]["total_num"] = all_ns_inst_count.get((cluster_id, key[1])) or ns_app.get(str(key[0])) or 0
if app_status in [1, "1"]:
ns_map[key]["total_num"] = (ns_map[key]["total_num"]) - error_num
if error_num > 0 and ns_app.get(str(key[0])) == error_num:
ns_map.pop(key)
continue
if app_status in [2, "2"]:
ns_map[key]["total_num"] = error_num
if error_num == 0:
ns_map.pop(key)
continue
ns_map[key]["env_type"] = cluster_env.get(cluster_id, {}).get('env_type')
ns_map[key]["error_num"] = error_num
ns_map[key]["cluster_name"] = cluster_env.get(cluster_id, {}).get('name')
if exist_app == "1":
if not ns_map[key]["total_num"]:
ns_map.pop(key, None)
def get_cluster_ns(self, ns_list, cluster_type, ns_id, cluster_env_map, request_cluster_id):
"""组装集群、命名空间等信息"""
ns_map = {}
ns_id_list = []
cluster_id_list = []
ns_name_list = []
for info in ns_list:
if exclude_records(
request_cluster_id,
info["cluster_id"],
cluster_type,
cluster_env_map.get(info["cluster_id"], {}).get("cluster_env"),
):
continue
if ns_id and str(info["id"]) != str(ns_id):
continue
ns_map[(info["id"], info["name"])] = {
"cluster_id": info["cluster_id"],
"id": info["id"],
"name": info["name"],
"project_id": info["project_id"],
}
ns_name_list.append(info["name"])
# id和cluster_id肯定存在
ns_id_list.append(info["id"])
cluster_id_list.append(info["cluster_id"])
return ns_map, ns_id_list, cluster_id_list, ns_name_list
def get_cluster_id_env(self, request, project_id):
data = self.get_project_cluster_info(request, project_id)
if not data.get("results"):
return {}, {}
cluster_results = data.get("results") or []
cluster_env_map = {
info["cluster_id"]: {
"cluster_name": info["name"],
"cluster_env": cluster_env(info["environment"]),
"cluster_env_str": cluster_env(info["environment"], ret_num_flag=False),
}
for info in cluster_results
if not info["disabled"]
}
return cluster_results, cluster_env_map
def get(self, request, project_id):
"""获取项目下的所有命名空间"""
# 获取过滤参数
cluster_type, app_status, app_id, ns_id, request_cluster_id = self.get_filter_params(request, project_id)
exist_app = request.GET.get("exist_app")
# 获取项目类型
project_kind = self.project_kind(request)
# 获取项目下集群类型
cluster_list, cluster_env_map = self.get_cluster_id_env(request, project_id)
if not cluster_list:
return APIResponse({"data": []})
# 获取项目下面的namespace
ns_list = self.get_namespace(request, project_id)
if not ns_list:
return APIResponse({"data": []})
# 组装命名空间数据、命名空间ID、项目下集群信息
ns_map, ns_id_list, cluster_id_list, ns_name_list = self.get_cluster_ns(
ns_list, cluster_type, ns_id, cluster_env_map, request_cluster_id
)
# 匹配集群的环境
cluster_env = {
info["cluster_id"]: {'env_type': CLUSTER_ENV_MAP.get(info["environment"], "stag"), 'name': info['name']}
for info in cluster_list
}
inst_name = None
if app_id:
inst_name = self.get_inst_name(app_id)
category = request.GET.get("category")
if not category or category not in CATEGORY_MAP.keys():
raise error_codes.CheckFailed(_("类型不正确"))
client = k8s_views.GetNamespace()
ns_app, ns_inst_error_count, create_error, all_ns_inst_count = client.get(
request,
ns_id_list,
category,
ns_map,
project_id,
project_kind,
self.get_app_deploy_with_post,
inst_name,
ns_name_list,
cluster_id_list,
)
# 匹配数据
self.compose_data(
ns_map, cluster_env, ns_app, exist_app, ns_inst_error_count, create_error, app_status, all_ns_inst_count
)
ret_data = ns_map.values()
return APIResponse({"data": ret_data})
class GetInstances(BaseNamespaceMetric):
def check_ns_with_project(self, request, project_id, ns_id, cluster_type, cluster_env_map):
"""判断命名空间属于项目"""
resp = paas_cc.get_namespace_list(request.user.token.access_token, project_id, desire_all_data=True)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError.f(resp.get("message"))
data = resp.get("data") or {}
if not data.get("results"):
raise error_codes.APIError(_("查询命名空间为空"))
ns_list = data["results"]
cluster_id = None
ns_name = None
for info in ns_list:
if str(info["id"]) != str(ns_id):
continue
else:
cluster_id = info["cluster_id"]
ns_name = info["name"]
return cluster_id, ns_name
def get(self, request, project_id, ns_id):
cluster_type, app_status, app_id, filter_ns_id, request_cluster_id = self.get_filter_params(
request, project_id
)
if filter_ns_id and str(ns_id) != str(filter_ns_id):
return APIResponse({"data": {}})
# 获取项目下集群类型
cluster_env_map = self.get_cluster_id_env(request, project_id)
# 检查命名空间属于项目
cluster_id, ns_name = self.check_ns_with_project(request, project_id, ns_id, cluster_type, cluster_env_map)
# 共享集群不允许通过该接口查询应用
if get_cluster_type(cluster_id) == ClusterType.SHARED:
return APIResponse({"data": {}})
inst_name = None
if app_id:
inst_name = self.get_inst_name(app_id)
ns_name_id = self.get_namespace_name_id(request, project_id)
# 根据类型进行过滤数据
category = request.GET.get("category")
if not category or category not in CATEGORY_MAP.keys():
raise error_codes.CheckFailed(_("类型不正确"))
client = k8s_views.GetInstances()
ret_data = client.get(
request,
project_id,
ns_id,
category,
inst_name,
app_status,
cluster_env_map,
cluster_id,
ns_name,
ns_name_id,
)
# 拆分需要处理权限和默认权限
auth_instance_list = []
default_auth_instance_list = []
for info in ret_data["instance_list"]:
if info["from_platform"]:
auth_instance_list.append(info)
else:
# info["permissions"] = {"create": True, "delete": True, "view": True, "edit": True, "use": True}
default_auth_instance_list.append(info)
# 添加权限
ret_instance_list = self.bcs_perm_handler(request, project_id, auth_instance_list)
# 针对非模板集的权限解析
default_ret_instance_list = self.bcs_perm_handler(
request, project_id, default_auth_instance_list, tmpl_view=False
)
ret_instance_list.extend(default_ret_instance_list)
ret_data["instance_list"] = ret_instance_list
return APIResponse({"data": ret_data})
| 41.225806
| 118
| 0.61473
|
9a78de7731f035a7e8e46b3cab89fbdd80da0610
| 595
|
py
|
Python
|
Searching and Sorting/SearchingInMatrix.py
|
haaris272k/Problem-Solving-Collection
|
5c8d0c36aff0d525ffec880115f5e123d0f3092b
|
[
"MIT"
] | 1
|
2022-02-28T06:49:25.000Z
|
2022-02-28T06:49:25.000Z
|
Searching and Sorting/SearchingInMatrix.py
|
haaris272k/Problem-Solving-Collection
|
5c8d0c36aff0d525ffec880115f5e123d0f3092b
|
[
"MIT"
] | null | null | null |
Searching and Sorting/SearchingInMatrix.py
|
haaris272k/Problem-Solving-Collection
|
5c8d0c36aff0d525ffec880115f5e123d0f3092b
|
[
"MIT"
] | null | null | null |
"""Given a matrix of size n x m, where every row and column is sorted in increasing order, and a number x. Find whether element x is present in the matrix or not.
Example 1:
Input:
n = 3, m = 3, x = 62
matrix[][] = {{ 3, 30, 38},
{36, 43, 60},
{40, 51, 69}}
Output: 0
Explanation:
62 is not present in the matrix,
so output is 0."""
def SearchIn2Dmatrix(matrix, x):
for i in range(len(matrix)):
if x in matrix[i]:
return True
return False
matrix = [[3, 30, 38], [36, 43, 60], [40, 51, 69]]
x = 69
print(SearchIn2Dmatrix(matrix, x))
| 22.884615
| 162
| 0.593277
|
8da812bc26eeeae6434651c9fad127a4a411873d
| 45,029
|
py
|
Python
|
ivy_tests/test_ivy/test_functional/test_core/test_general.py
|
saeedashrraf/ivy
|
ad57c359a11459d68965d9044a76ea30e175bf16
|
[
"Apache-2.0"
] | 1
|
2022-03-10T00:43:53.000Z
|
2022-03-10T00:43:53.000Z
|
ivy_tests/test_ivy/test_functional/test_core/test_general.py
|
Ahmed-Yahia-cs/ivy
|
33f1ab8e63a99bebd583ab8903dc0a2c4a660861
|
[
"Apache-2.0"
] | null | null | null |
ivy_tests/test_ivy/test_functional/test_core/test_general.py
|
Ahmed-Yahia-cs/ivy
|
33f1ab8e63a99bebd583ab8903dc0a2c4a660861
|
[
"Apache-2.0"
] | 1
|
2022-03-17T00:22:36.000Z
|
2022-03-17T00:22:36.000Z
|
"""Collection of tests for unified general functions."""
# global
import einops
import pytest
from hypothesis import given, strategies as st
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
length = len(lst[0])
if not all(len(item) == length for item in lst):
msg = "not all lists have the same length"
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@given(fw_str=st.sampled_from(["numpy", "jax", "torch", "mxnet"]))
def test_set_framework(fw_str, device, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(device, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@given(allow_duplicates=st.booleans())
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
def func_b(a, d, e=5):
return None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6}
kwfa, kwfb, kwca = ivy.match_kwargs(
kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates
)
if allow_duplicates:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"a": 0, "d": 3, "e": 4}
assert kwca == {"c": 2, "f": 5, "g": 6}
else:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"d": 3, "e": 4}
assert kwca == {"f": 5, "g": 6}
# def test_get_referrers_recursive(device, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# copy array
@given(dtype_and_x=helpers.dtype_and_values(ivy_np.valid_dtype_strs))
def test_copy_array(dtype_and_x, device, call, fw):
dtype, x = dtype_and_x
if fw == "torch" and dtype in ["uint16", "uint32", "uint64"]:
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
# smoke test
x = ivy.array(x, dtype, device)
ret = ivy.copy_array(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
helpers.assert_all_close(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# array_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs, n_arrays=2))
def test_array_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support
# bool for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype=dtype0, device=device)
x1 = ivy.array(x1, dtype=dtype1, device=device)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
assert res == np.array_equal(np.array(x0, dtype=dtype0), np.array(x1, dtype=dtype1))
# arrays_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs, n_arrays=3))
def test_arrays_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
dtype2, x2 = x0_n_x1_n_res[0][2], x0_n_x1_n_res[1][2]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
or dtype2 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support bool
# for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype0, device)
x1 = ivy.array(x1, dtype1, device)
x2 = ivy.array(x2, dtype2, device)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert ivy.is_ivy_array(x2)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
true_res = (
np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x1))
and np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x2))
and np.array_equal(ivy.to_numpy(x1), ivy.to_numpy(x2))
)
assert res == true_res
# to_numpy
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs))
def test_to_numpy(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
return
# smoke test
ret = ivy.to_numpy(ivy.array(object_in, dtype, device))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
helpers.assert_all_close(ret, np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@given(
object_in=st.sampled_from([[0.0], [[[1]]], [True], [[1.0]]]),
dtype=st.sampled_from(ivy_np.valid_dtype_strs),
)
def test_to_scalar(object_in, dtype, device, call, fw):
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
return
# smoke test
ret = ivy.to_scalar(ivy.array(object_in, dtype, device))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(ivy.array(object_in, dtype, device)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs))
def test_to_list(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
return
# smoke test
ret = ivy.to_list(ivy.array(object_in, dtype, device))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(
np.nan_to_num(
np.asarray(ivy.to_list(ivy.array(object_in, dtype, device))),
posinf=np.inf,
neginf=-np.inf,
),
np.nan_to_num(np.array(object_in).astype(dtype), posinf=np.inf, neginf=-np.inf),
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_shape(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtype_strs),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_get_num_dims(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
return
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[
(-0.5, 0.4, 2.0, -0.4),
([1.7], 1.5, 3.0, [1.5]),
(
[[0.8, 2.2], [1.5, 0.2]],
4.0,
1.0,
[[0.6808511, 1.8723406], [1.2765958, 0.17021278]],
),
(
[[0.8, 2.2], [1.5, 0.2]],
2.5,
2.0,
[[0.71749604, 1.9731141], [1.345305, 0.17937401]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(
x_max_norm_n_p_val_clipped, dtype, with_out, tensor_fn, device, call
):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, device)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
if with_out:
out = ivy.zeros(x.shape if len(x.shape) else (1,))
ret = ivy.clip_vector_norm(x, max_norm, p_val, out=out)
else:
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(
call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped)
)
if with_out:
if not ivy.current_framework_str() in ["tensorflow", "jax"]:
# these frameworks do not support native inplace updates
assert ret is out
assert ret.data is out.data
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
# floormod
# @given(
# xy=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs, n_arrays=2),
# as_variable=st.booleans(),
# with_out=st.booleans(),
# num_positional_args=st.integers(1, 2),
# native_array=st.booleans(),
# container=st.booleans(),
# instance_method=st.booleans(),
# )
# def test_floormod(
# xy,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# device,
# call,
# fw,
# ):
# # smoke test
# dtype = xy[0]
# x = xy[1][0]
# divisor = np.abs(xy[1][1])
# if 0 in divisor:
# return
# if fw == "torch" and any(d in ["uint16", "uint32", "uint64"] for d in dtype):
# return
# helpers.test_array_function(
# dtype,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# fw,
# "floormod",
# x=np.asarray(x, dtype=dtype[0]),
# y=np.asarray(divisor, dtype=dtype[1]),
# )
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0.0, 1.0, 2.0]], 0), ([[0.0, 1.0, 2.0]], 1)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (
axis % len(x.shape)
if (axis is not None and len(x.shape) != 0)
else len(x.shape) - 1
)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(
call(ivy.unstack, x, axis),
np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)),
)
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt",
[
(
[2.0],
4.0,
4,
[
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
3.4969111e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
]
],
),
(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[2.0, 4.0],
4,
[
[
[
1.0000000e00,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
-6.0398321e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
],
],
[
[
3.0000000e00,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
4.0000000e00,
3.4969111e-07,
-1.2434989e-01,
8.9044148e-01,
-1.2079664e-06,
1.0000000e00,
-9.9223840e-01,
4.5509776e-01,
1.0000000e00,
],
],
[
[
5.0000000e00,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
6.0000000e00,
4.7699523e-08,
-9.8256493e-01,
-9.9706185e-01,
-3.7192983e-06,
1.0000000e00,
1.8591987e-01,
7.6601014e-02,
1.0000000e00,
],
],
],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, device, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype, device)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2 * num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(
call(ivy.fourier_encode, x, max_freq, num_bands),
np.array(ground_truth),
atol=1e-5,
)
# indices_where
@pytest.mark.parametrize("x", [[True], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, device, call):
# smoke test
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, device)
ret = ivy.indices_where(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(
call(ivy.indices_where, x),
np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))),
)
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, device, call):
# smoke test
ind, depth = ind_n_depth
if (
isinstance(ind, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, "int32", device)
ret = ivy.one_hot(ind, depth, device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(
call(ivy.one_hot, ind, depth, device),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)),
)
# cumsum
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumsum(x, axis, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumsum(x, axis, out=out)
else:
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumsum, x, axis),
np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)),
)
# out test
if with_out:
if not ivy.current_framework_str() in ["tensorflow", "jax"]:
# these frameworks do not support native inplace updates
assert ret is out
assert ret.data is out.data
# cumprod
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("exclusive", [True, False])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumprod, x, axis, exclusive),
np.asarray(
ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)
),
)
# out test
if with_out:
if not ivy.current_framework_str() in ["tensorflow", "jax"]:
# these frameworks do not support native inplace updates
assert ret is out
assert ret.data is out.data
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup",
[
([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(
inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, "int32", device)
upd = tensor_fn(upd, dtype, device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype, device)
if ivy.current_framework_str() == "torch"
else tensor_fn(tensor, dtype, device)
)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(
call(ivy.scatter_flat, inds, upd, size, tensor, red, device),
np.asarray(
ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
)
),
)
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[
([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False),
([[0, 1, 2]], [1], [3, 3, 3], None, False),
(
[[0], [2]],
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
],
[4, 4, 4],
None,
False,
),
(
[[0, 1, 2]],
[1],
None,
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
],
False,
),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(
inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, "int32", device)
upd = tensor_fn(upd, dtype, device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype, device)
if ivy.current_framework_str() == "torch"
else tensor_fn(tensor, dtype, device)
)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, device)
true = np.asarray(
ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
)
)
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis",
[
([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, device)
inds = ivy.array(inds, "int32", device)
if with_out:
out = ivy.zeros(inds.shape)
ret = ivy.gather(prms, inds, axis, device, out=out)
else:
ret = ivy.gather(prms, inds, axis, device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(
call(ivy.gather, prms, inds, axis, device),
np.asarray(
ivy.functional.backends.numpy.gather(
ivy.to_numpy(prms), ivy.to_numpy(inds), axis
)
),
)
# out test
if with_out:
if not ivy.current_framework_str() in ["tensorflow", "jax"]:
# these frameworks do not support native inplace updates
assert ret is out
assert ret.data is out.data
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds",
[
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
(
[[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]],
[[[0, 1, 0]], [[1, 0, 1]]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, device, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, device)
inds = ivy.array(inds, "int32", device)
ret = ivy.gather_nd(prms, inds, device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1] :]
# value test
assert np.allclose(
call(ivy.gather_nd, prms, inds, device),
np.asarray(
ivy.functional.backends.numpy.gather_nd(
ivy.to_numpy(prms), ivy.to_numpy(inds)
)
),
)
# exists
@pytest.mark.parametrize("x", [[1.0], None, [[10.0, 9.0, 8.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype, device) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.0], [2.0]), (None, [2.0]), ([[10.0, 9.0, 8.0]], [2.0])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, device, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, device) if x is not None else None
dv = tensor_fn(dv, dtype, device)
ret = ivy.default(x, dv)
# type test
assert ivy.is_ivy_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
def test_cache_fn(device, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(device, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(device, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework("numpy")
x_ = np.array([0.0, 1.0, 2.0])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0.0, 1.0, 2.0])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
def test_explicit_ivy_framework_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
# store original framework string and unset
fw_str = ivy.current_framework_str()
ivy.unset_framework()
# set with explicit handle caught
ivy_exp = ivy.get_framework(fw_str)
assert ivy_exp.current_framework_str() == fw_str
# assert backend implemented function is accessible
assert "array" in ivy_exp.__dict__
assert callable(ivy_exp.array)
# assert joint implemented function is also accessible
assert "cache_fn" in ivy_exp.__dict__
assert callable(ivy_exp.cache_fn)
# set global ivy to numpy
ivy.set_framework("numpy")
# assert the explicit handle is still unchanged
assert ivy.current_framework_str() == "numpy"
assert ivy_exp.current_framework_str() == fw_str
# unset global ivy from numpy
ivy.unset_framework()
def test_class_ivy_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
class ArrayGen:
def __init__(self, ivyh):
self._ivy = ivyh
def get_array(self):
return self._ivy.array([0.0, 1.0, 2.0])
# create instance
ag = ArrayGen(ivy.get_framework())
# create array from array generator
x = ag.get_array()
# verify this is not a numpy array
assert not isinstance(x, np.ndarray)
# change global framework to numpy
ivy.set_framework("numpy")
# create another array from array generator
x = ag.get_array()
# verify this is not still a numpy array
assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx",
[([[0.0, 1.0, 2.0, 3.0]], "b n -> n b", [[0.0], [1.0], [2.0], [3.0]])],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, device)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0.0, 1.0, 2.0, 3.0]], "b n -> b", "mean", [1.5])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, device)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx",
[
(
[[0.0, 1.0, 2.0, 3.0]],
"b n -> b n c",
{"c": 2},
[[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]],
)
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, device)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# container types
def test_container_types(device, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, "keys")
assert hasattr(cont_type, "values")
assert hasattr(cont_type, "items")
def test_inplace_arrays_supported(device, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ["numpy", "mxnet", "torch"]:
assert ivy.inplace_arrays_supported()
elif cur_fw in ["jax", "tensorflow"]:
assert not ivy.inplace_arrays_supported()
else:
raise Exception("Unrecognized framework")
def test_inplace_variables_supported(device, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ["numpy", "mxnet", "torch", "tensorflow"]:
assert ivy.inplace_variables_supported()
elif cur_fw in ["jax"]:
assert not ivy.inplace_variables_supported()
else:
raise Exception("Unrecognized framework")
@pytest.mark.parametrize("x_n_new", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_update(x_n_new, tensor_fn, device, call):
x_orig, new_val = x_n_new
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, "float32", device)
new_val = tensor_fn(new_val, "float32", device)
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_update(x_orig, new_val)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
return
pytest.skip()
@pytest.mark.parametrize("x_n_dec", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_decrement(x_n_dec, tensor_fn, device, call):
x_orig, dec = x_n_dec
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, "float32", device)
dec = tensor_fn(dec, "float32", device)
new_val = x_orig - dec
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_decrement(x_orig, dec)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
@pytest.mark.parametrize("x_n_inc", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_increment(x_n_inc, tensor_fn, device, call):
x_orig, inc = x_n_inc
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, "float32", device)
inc = tensor_fn(inc, "float32", device)
new_val = x_orig + inc
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_increment(x_orig, inc)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
# Still to Add #
# ---------------#
# is_ivy_array
# is_array
# is_ivy_container
# all_equal
# to_numpy
# clip_matrix_norm
# unstack
# value_is_nan
# has_nans
# exists
# shape_to_tuple
# try_else_none
# arg_names
# cache_fn
# current_framework_str
# get_min_denominator
# set_min_denominator
# get_min_base
# set_min_base
# stable_divide
# stable_pow
# get_all_arrays_in_memory
# num_arrays_in_memory
# print_all_arrays_in_memory
# set_queue_timeout
# queue_timeout
# tmp_dir
# set_tmp_dir
# supports_inplace
# assert_supports_inplace
| 31.777699
| 88
| 0.594395
|
e8bbe1df2c32c2e9eac436e9834f870941989f0a
| 7,029
|
py
|
Python
|
catkin_ws/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateSource.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateSource.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateSource.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from baxter_maintenance_msgs/UpdateSource.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class UpdateSource(genpy.Message):
_md5sum = "88ad69e3ed4d619e167c9d83e6d9310f"
_type = "baxter_maintenance_msgs/UpdateSource"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string devname
string filename
string version
string uuid
"""
__slots__ = ['devname','filename','version','uuid']
_slot_types = ['string','string','string','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
devname,filename,version,uuid
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(UpdateSource, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.devname is None:
self.devname = ''
if self.filename is None:
self.filename = ''
if self.version is None:
self.version = ''
if self.uuid is None:
self.uuid = ''
else:
self.devname = ''
self.filename = ''
self.version = ''
self.uuid = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.devname
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.filename
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.version
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.uuid
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.devname = str[start:end].decode('utf-8')
else:
self.devname = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.filename = str[start:end].decode('utf-8')
else:
self.filename = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.version = str[start:end].decode('utf-8')
else:
self.version = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.uuid = str[start:end].decode('utf-8')
else:
self.uuid = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.devname
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.filename
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.version
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.uuid
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.devname = str[start:end].decode('utf-8')
else:
self.devname = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.filename = str[start:end].decode('utf-8')
else:
self.filename = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.version = str[start:end].decode('utf-8')
else:
self.version = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.uuid = str[start:end].decode('utf-8')
else:
self.uuid = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
| 31.24
| 145
| 0.595675
|
563c42ddef27ac6b12297cc03634c6678a369cf4
| 825
|
py
|
Python
|
mysite/urls.py
|
alias-3/Twitter_sentilysis
|
1a751b6fe9f772075f439debac79cecc025a6f74
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
alias-3/Twitter_sentilysis
|
1a751b6fe9f772075f439debac79cecc025a6f74
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
alias-3/Twitter_sentilysis
|
1a751b6fe9f772075f439debac79cecc025a6f74
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('sentilysis.urls'))
]
| 34.375
| 77
| 0.710303
|
cdb01f7fc91a07ea926fb3a0fb9e12128dee5342
| 1,725
|
py
|
Python
|
glhe/aggregation/no_agg.py
|
stianchris/GLHE
|
80c3eecca81ffd50d5077f87027c9441292452f5
|
[
"MIT"
] | 2
|
2018-11-06T08:04:04.000Z
|
2020-10-09T14:52:36.000Z
|
glhe/aggregation/no_agg.py
|
stianchris/GLHE
|
80c3eecca81ffd50d5077f87027c9441292452f5
|
[
"MIT"
] | 68
|
2018-03-27T01:43:22.000Z
|
2019-09-09T12:05:44.000Z
|
glhe/aggregation/no_agg.py
|
mitchute/GLHE
|
80c3eecca81ffd50d5077f87027c9441292452f5
|
[
"MIT"
] | 4
|
2018-05-24T03:02:44.000Z
|
2021-08-16T13:54:09.000Z
|
import numpy as np
from glhe.aggregation.agg_types import AggregationTypes
from glhe.aggregation.base_agg import BaseAgg
class NoAgg(BaseAgg):
"""
No aggregation. Just keep all of the values.
"""
Type = AggregationTypes.NO_AGG
def __init__(self, inputs):
BaseAgg.__init__(self, inputs)
def aggregate(self, time: int, energy: float):
# check for iteration
if self.prev_update_time == time:
return
# log the values
self.energy = np.append(self.energy, energy)
dt = time - self.prev_update_time
self.dts = np.append(self.dts, dt)
# update time
self.prev_update_time = time
def calc_temporal_superposition(self, time_step: int) -> float:
# compute temporal superposition
# this includes all thermal history before the present time
q = self.energy / self.dts
dq = np.diff(q, prepend=0)
# g-function values
dts = np.append(self.dts, time_step)
times = np.flipud(np.cumsum(np.flipud(dts)))[:-1]
lntts = np.log(times / self.ts)
g = self.interp_g(lntts)
# convolution of delta_q and the g-function values
if self.interp_g_b:
# convolution for "g" and "g_b" g-functions
g_b = self.interp_g_b(lntts)
return float(np.dot(dq, np.add(g, g_b)))
else:
# convolution for "g" g-functions only
return float(np.dot(dq, g))
def get_g_value(self, time_step: int) -> float:
pass # pragma: no cover
def get_g_b_value(self, time_step: int) -> float:
pass # pragma: no cover
def get_q_prev(self) -> float:
pass # pragma: no cover
| 29.237288
| 67
| 0.610435
|
ef4cd14cf03ae0cca9aa7d4166c71eb88bc4e272
| 491
|
py
|
Python
|
client/setup.py
|
Yinqingwen/Dva
|
3b8d1d1435f6a804a9c370006b931f9dc50a7462
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1
|
2021-04-10T20:19:35.000Z
|
2021-04-10T20:19:35.000Z
|
client/setup.py
|
Yinqingwen/Dva
|
3b8d1d1435f6a804a9c370006b931f9dc50a7462
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null |
client/setup.py
|
Yinqingwen/Dva
|
3b8d1d1435f6a804a9c370006b931f9dc50a7462
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 3
|
2021-07-13T10:52:48.000Z
|
2022-03-11T03:31:45.000Z
|
#!/usr/bin/env python
from setuptools import setup
setup(name='dvaclient',
version='1.0',
description='Deep Video Analytics Client',
author='Akshay Bhat',
author_email='dvaclient@deepvideoanalytics.com',
url='https://www.deepvideoanalytics.com/',
packages=['dvaclient'],
package_data={'dvaclient': ['schema.json']},
include_package_data=True,
install_requires=[
'jsonschema==2.6.0',
'requests'
],
)
| 25.842105
| 54
| 0.613035
|
6c2ff00362b371338db21f75992e9a30c64f13ca
| 6,865
|
py
|
Python
|
Lib/test/test_keywordonlyarg.py
|
deadsnakes/python3.3
|
4faaf44cd5478410ac3b977351c1965fa054b5e9
|
[
"PSF-2.0"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
Lib/test/test_keywordonlyarg.py
|
deadsnakes/python3.3
|
4faaf44cd5478410ac3b977351c1965fa054b5e9
|
[
"PSF-2.0"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
check-python33-manual/samples/standard_library_337/Lib/test/test_keywordonlyarg.py
|
DaveKaretnyk/parsing-utils2
|
40085bbd399fa605f2f2a4708d385a64ffc907de
|
[
"MIT"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
"""Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
from test.support import run_unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def sortnum(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testSyntaxErrorForFunctionDefinition(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.assertRaisesSyntaxError(fundef)
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.assertRaisesSyntaxError(fundef2)
# exactly 255 arguments, should compile ok
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
compile(fundef3, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes from 1 to 2 positional arguments but 3 were given"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], sortnum(3,2,1))
self.assertEqual([3,2,1], sortnum(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_issue13343(self):
# The Python compiler must scan all symbols of a function to
# determine their scope: global, local, cell...
# This was not done for the default values of keyword
# arguments in a lambda definition, and the following line
# used to fail with a SystemError.
lambda *, k1=unittest: None
def test_mangling(self):
class X:
def f(self, *, __a=42):
return __a
self.assertEqual(X().f(), 42)
def test_main():
run_unittest(KeywordOnlyArgTestCase)
if __name__ == "__main__":
test_main()
| 37.71978
| 80
| 0.586016
|
c241167fbee7c3fc99202b0b2a10910f9f3c053e
| 22,431
|
py
|
Python
|
synapse/storage/registration.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/registration.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/registration.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from twisted.internet import defer
from synapse.api.errors import StoreError, Codes
from synapse.storage import background_updates
from synapse.storage._base import SQLBaseStore
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from six.moves import range
class RegistrationWorkerStore(SQLBaseStore):
@cached()
def get_user_by_id(self, user_id):
return self._simple_select_one(
table="users",
keyvalues={
"name": user_id,
},
retcols=[
"name", "password_hash", "is_guest",
"consent_version", "consent_server_notice_sent",
"appservice_id",
],
allow_none=True,
desc="get_user_by_id",
)
@cached()
def get_user_by_access_token(self, token):
"""Get a user from the given access token.
Args:
token (str): The access token of a user.
Returns:
defer.Deferred: None, if the token did not match, otherwise dict
including the keys `name`, `is_guest`, `device_id`, `token_id`.
"""
return self.runInteraction(
"get_user_by_access_token",
self._query_for_auth,
token
)
@defer.inlineCallbacks
def is_server_admin(self, user):
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user.to_string()},
retcol="admin",
allow_none=True,
desc="is_server_admin",
)
defer.returnValue(res if res else False)
def _query_for_auth(self, txn, token):
sql = (
"SELECT users.name, users.is_guest, access_tokens.id as token_id,"
" access_tokens.device_id"
" FROM users"
" INNER JOIN access_tokens on users.name = access_tokens.user_id"
" WHERE token = ?"
)
txn.execute(sql, (token,))
rows = self.cursor_to_dict(txn)
if rows:
return rows[0]
return None
class RegistrationStore(RegistrationWorkerStore,
background_updates.BackgroundUpdateStore):
def __init__(self, db_conn, hs):
super(RegistrationStore, self).__init__(db_conn, hs)
self.clock = hs.get_clock()
self.register_background_index_update(
"access_tokens_device_index",
index_name="access_tokens_device_id",
table="access_tokens",
columns=["user_id", "device_id"],
)
self.register_background_index_update(
"users_creation_ts",
index_name="users_creation_ts",
table="users",
columns=["creation_ts"],
)
# we no longer use refresh tokens, but it's possible that some people
# might have a background update queued to build this index. Just
# clear the background update.
self.register_noop_background_update("refresh_tokens_device_index")
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id=None):
"""Adds an access token for the given user.
Args:
user_id (str): The user ID.
token (str): The new access token to add.
device_id (str): ID of the device to associate with the access
token
Raises:
StoreError if there was a problem adding this.
"""
next_id = self._access_tokens_id_gen.get_next()
yield self._simple_insert(
"access_tokens",
{
"id": next_id,
"user_id": user_id,
"token": token,
"device_id": device_id,
},
desc="add_access_token_to_user",
)
def register(self, user_id, token=None, password_hash=None,
was_guest=False, make_guest=False, appservice_id=None,
create_profile_with_localpart=None, admin=False):
"""Attempts to register an account.
Args:
user_id (str): The desired user ID to register.
token (str): The desired access token to use for this user. If this
is not None, the given access token is associated with the user
id.
password_hash (str): Optional. The password hash for this user.
was_guest (bool): Optional. Whether this is a guest account being
upgraded to a non-guest account.
make_guest (boolean): True if the the new user should be guest,
false to add a regular user account.
appservice_id (str): The ID of the appservice registering the user.
create_profile_with_localpart (str): Optionally create a profile for
the given localpart.
Raises:
StoreError if the user_id could not be registered.
"""
return self.runInteraction(
"register",
self._register,
user_id,
token,
password_hash,
was_guest,
make_guest,
appservice_id,
create_profile_with_localpart,
admin
)
def _register(
self,
txn,
user_id,
token,
password_hash,
was_guest,
make_guest,
appservice_id,
create_profile_with_localpart,
admin,
):
now = int(self.clock.time())
next_id = self._access_tokens_id_gen.get_next()
try:
if was_guest:
# Ensure that the guest user actually exists
# ``allow_none=False`` makes this raise an exception
# if the row isn't in the database.
self._simple_select_one_txn(
txn,
"users",
keyvalues={
"name": user_id,
"is_guest": 1,
},
retcols=("name",),
allow_none=False,
)
self._simple_update_one_txn(
txn,
"users",
keyvalues={
"name": user_id,
"is_guest": 1,
},
updatevalues={
"password_hash": password_hash,
"upgrade_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
"admin": 1 if admin else 0,
}
)
else:
self._simple_insert_txn(
txn,
"users",
values={
"name": user_id,
"password_hash": password_hash,
"creation_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
"admin": 1 if admin else 0,
}
)
except self.database_engine.module.IntegrityError:
raise StoreError(
400, "User ID already taken.", errcode=Codes.USER_IN_USE
)
if token:
# it's possible for this to get a conflict, but only for a single user
# since tokens are namespaced based on their user ID
txn.execute(
"INSERT INTO access_tokens(id, user_id, token)"
" VALUES (?,?,?)",
(next_id, user_id, token,)
)
if create_profile_with_localpart:
# set a default displayname serverside to avoid ugly race
# between auto-joins and clients trying to set displaynames
txn.execute(
"INSERT INTO profiles(user_id, displayname) VALUES (?,?)",
(create_profile_with_localpart, create_profile_with_localpart)
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
txn.call_after(self.is_guest.invalidate, (user_id,))
def get_users_by_id_case_insensitive(self, user_id):
"""Gets users that match user_id case insensitively.
Returns a mapping of user_id -> password_hash.
"""
def f(txn):
sql = (
"SELECT name, password_hash FROM users"
" WHERE lower(name) = lower(?)"
)
txn.execute(sql, (user_id,))
return dict(txn)
return self.runInteraction("get_users_by_id_case_insensitive", f)
def user_set_password_hash(self, user_id, password_hash):
"""
NB. This does *not* evict any cache because the one use for this
removes most of the entries subsequently anyway so it would be
pointless. Use flush_user separately.
"""
def user_set_password_hash_txn(txn):
self._simple_update_one_txn(
txn,
'users', {
'name': user_id
},
{
'password_hash': password_hash
}
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
return self.runInteraction(
"user_set_password_hash", user_set_password_hash_txn
)
def user_set_consent_version(self, user_id, consent_version):
"""Updates the user table to record privacy policy consent
Args:
user_id (str): full mxid of the user to update
consent_version (str): version of the policy the user has consented
to
Raises:
StoreError(404) if user not found
"""
def f(txn):
self._simple_update_one_txn(
txn,
table='users',
keyvalues={'name': user_id, },
updatevalues={'consent_version': consent_version, },
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
return self.runInteraction("user_set_consent_version", f)
def user_set_consent_server_notice_sent(self, user_id, consent_version):
"""Updates the user table to record that we have sent the user a server
notice about privacy policy consent
Args:
user_id (str): full mxid of the user to update
consent_version (str): version of the policy we have notified the
user about
Raises:
StoreError(404) if user not found
"""
def f(txn):
self._simple_update_one_txn(
txn,
table='users',
keyvalues={'name': user_id, },
updatevalues={'consent_server_notice_sent': consent_version, },
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
return self.runInteraction("user_set_consent_server_notice_sent", f)
def user_delete_access_tokens(self, user_id, except_token_id=None,
device_id=None):
"""
Invalidate access tokens belonging to a user
Args:
user_id (str): ID of user the tokens belong to
except_token_id (str): list of access_tokens IDs which should
*not* be deleted
device_id (str|None): ID of device the tokens are associated with.
If None, tokens associated with any device (or no device) will
be deleted
Returns:
defer.Deferred[list[str, int, str|None, int]]: a list of
(token, token id, device id) for each of the deleted tokens
"""
def f(txn):
keyvalues = {
"user_id": user_id,
}
if device_id is not None:
keyvalues["device_id"] = device_id
items = keyvalues.items()
where_clause = " AND ".join(k + " = ?" for k, _ in items)
values = [v for _, v in items]
if except_token_id:
where_clause += " AND id != ?"
values.append(except_token_id)
txn.execute(
"SELECT token, id, device_id FROM access_tokens WHERE %s" % where_clause,
values
)
tokens_and_devices = [(r[0], r[1], r[2]) for r in txn]
for token, _, _ in tokens_and_devices:
self._invalidate_cache_and_stream(
txn, self.get_user_by_access_token, (token,)
)
txn.execute(
"DELETE FROM access_tokens WHERE %s" % where_clause,
values
)
return tokens_and_devices
return self.runInteraction(
"user_delete_access_tokens", f,
)
def delete_access_token(self, access_token):
def f(txn):
self._simple_delete_one_txn(
txn,
table="access_tokens",
keyvalues={
"token": access_token
},
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_access_token, (access_token,)
)
return self.runInteraction("delete_access_token", f)
@cachedInlineCallbacks()
def is_guest(self, user_id):
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user_id},
retcol="is_guest",
allow_none=True,
desc="is_guest",
)
defer.returnValue(res if res else False)
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
@defer.inlineCallbacks
def get_user_id_by_threepid(self, medium, address):
ret = yield self._simple_select_one(
"user_threepids",
{
"medium": medium,
"address": address
},
['user_id'], True, 'get_user_id_by_threepid'
)
if ret:
defer.returnValue(ret['user_id'])
defer.returnValue(None)
def user_delete_threepids(self, user_id):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
},
desc="user_delete_threepids",
)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
@defer.inlineCallbacks
def count_all_users(self):
"""Counts all users registered on the homeserver."""
def _count_users(txn):
txn.execute("SELECT COUNT(*) AS users FROM users")
rows = self.cursor_to_dict(txn)
if rows:
return rows[0]["users"]
return 0
ret = yield self.runInteraction("count_users", _count_users)
defer.returnValue(ret)
def count_daily_user_type(self):
"""
Counts 1) native non guest users
2) native guests users
3) bridged users
who registered on the homeserver in the past 24 hours
"""
def _count_daily_user_type(txn):
yesterday = int(self._clock.time()) - (60 * 60 * 24)
sql = """
SELECT user_type, COALESCE(count(*), 0) AS count FROM (
SELECT
CASE
WHEN is_guest=0 AND appservice_id IS NULL THEN 'native'
WHEN is_guest=1 AND appservice_id IS NULL THEN 'guest'
WHEN is_guest=0 AND appservice_id IS NOT NULL THEN 'bridged'
END AS user_type
FROM users
WHERE creation_ts > ?
) AS t GROUP BY user_type
"""
results = {'native': 0, 'guest': 0, 'bridged': 0}
txn.execute(sql, (yesterday,))
for row in txn:
results[row[0]] = row[1]
return results
return self.runInteraction("count_daily_user_type", _count_daily_user_type)
@defer.inlineCallbacks
def count_nonbridged_users(self):
def _count_users(txn):
txn.execute("""
SELECT COALESCE(COUNT(*), 0) FROM users
WHERE appservice_id IS NULL
""")
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_users", _count_users)
defer.returnValue(ret)
@defer.inlineCallbacks
def find_next_generated_user_id_localpart(self):
"""
Gets the localpart of the next generated user ID.
Generated user IDs are integers, and we aim for them to be as small as
we can. Unfortunately, it's possible some of them are already taken by
existing users, and there may be gaps in the already taken range. This
function returns the start of the first allocatable gap. This is to
avoid the case of ID 10000000 being pre-allocated, so us wasting the
first (and shortest) many generated user IDs.
"""
def _find_next_generated_user_id(txn):
txn.execute("SELECT name FROM users")
regex = re.compile("^@(\d+):")
found = set()
for user_id, in txn:
match = regex.search(user_id)
if match:
found.add(int(match.group(1)))
for i in range(len(found) + 1):
if i not in found:
return i
defer.returnValue((yield self.runInteraction(
"find_next_generated_user_id",
_find_next_generated_user_id
)))
@defer.inlineCallbacks
def get_3pid_guest_access_token(self, medium, address):
ret = yield self._simple_select_one(
"threepid_guest_access_tokens",
{
"medium": medium,
"address": address
},
["guest_access_token"], True, 'get_3pid_guest_access_token'
)
if ret:
defer.returnValue(ret["guest_access_token"])
defer.returnValue(None)
@defer.inlineCallbacks
def save_or_get_3pid_guest_access_token(
self, medium, address, access_token, inviter_user_id
):
"""
Gets the 3pid's guest access token if exists, else saves access_token.
Args:
medium (str): Medium of the 3pid. Must be "email".
address (str): 3pid address.
access_token (str): The access token to persist if none is
already persisted.
inviter_user_id (str): User ID of the inviter.
Returns:
deferred str: Whichever access token is persisted at the end
of this function call.
"""
def insert(txn):
txn.execute(
"INSERT INTO threepid_guest_access_tokens "
"(medium, address, guest_access_token, first_inviter) "
"VALUES (?, ?, ?, ?)",
(medium, address, access_token, inviter_user_id)
)
try:
yield self.runInteraction("save_3pid_guest_access_token", insert)
defer.returnValue(access_token)
except self.database_engine.module.IntegrityError:
ret = yield self.get_3pid_guest_access_token(medium, address)
defer.returnValue(ret)
def add_user_pending_deactivation(self, user_id):
"""
Adds a user to the table of users who need to be parted from all the rooms they're
in
"""
return self._simple_insert(
"users_pending_deactivation",
values={
"user_id": user_id,
},
desc="add_user_pending_deactivation",
)
def del_user_pending_deactivation(self, user_id):
"""
Removes the given user to the table of users who need to be parted from all the
rooms they're in, effectively marking that user as fully deactivated.
"""
return self._simple_delete_one(
"users_pending_deactivation",
keyvalues={
"user_id": user_id,
},
desc="del_user_pending_deactivation",
)
def get_user_pending_deactivation(self):
"""
Gets one user from the table of users waiting to be parted from all the rooms
they're in.
"""
return self._simple_select_one_onecol(
"users_pending_deactivation",
keyvalues={},
retcol="user_id",
allow_none=True,
desc="get_users_pending_deactivation",
)
| 34.245802
| 90
| 0.54817
|
9686fde074e33406699b9125506cd0cf38699024
| 426
|
py
|
Python
|
lib/wordcheck.py
|
amichaelparker/pass_gen
|
64c6fe8aa95a352df06c568fd5c78df2f1cd2ae2
|
[
"Unlicense"
] | null | null | null |
lib/wordcheck.py
|
amichaelparker/pass_gen
|
64c6fe8aa95a352df06c568fd5c78df2f1cd2ae2
|
[
"Unlicense"
] | null | null | null |
lib/wordcheck.py
|
amichaelparker/pass_gen
|
64c6fe8aa95a352df06c568fd5c78df2f1cd2ae2
|
[
"Unlicense"
] | null | null | null |
''' Word testing functions '''
def caps_test(word):
''' Test words for capitalization '''
if word[0] == word[0].upper():
return True
return False
def word_length(word, maximum, minimum):
''' Check that word length falls into requested bounds '''
if minimum > maximum:
minimum = maximum
if minimum <= len(word) <= maximum:
return True
return False
| 20.285714
| 63
| 0.584507
|
1f018d0f42ecc899b72f5f375ac0acacc708e744
| 634
|
py
|
Python
|
setup.py
|
Alquimista/scru
|
331e0213a0a3a3c53b8da5bfd541767412479bee
|
[
"MIT"
] | null | null | null |
setup.py
|
Alquimista/scru
|
331e0213a0a3a3c53b8da5bfd541767412479bee
|
[
"MIT"
] | null | null | null |
setup.py
|
Alquimista/scru
|
331e0213a0a3a3c53b8da5bfd541767412479bee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='Scru',
version='1.0.0',
description='Screenshot Uploader.',
author='Roberto Gea (Alquimista)',
author_email='alquimistaotaku@gmail.com',
license='MIT',
packages=['scru'],
install_requires=['python-notify', 'plac'],
data_files=[
('/usr/bin', ['bin/scru']),
('/usr/bin', ['bin/scru-openbox-pipemenu']),
('/usr/share/sounds', ['sounds/scru_shot.wav']),
('/usr/share/icons/hicolor/scalable/apps', ['icons/scru.svg']),
('/usr/share/licenses/scru', ['LICENSE'])],
)
| 30.190476
| 71
| 0.591483
|
098f9f4830e142f23785a5721a2d78747e933f44
| 9,067
|
py
|
Python
|
tasks/generate.py
|
thedrow/pip
|
1927dfce6a5f2b1d0a1cfd5c09e29c7422c769cd
|
[
"MIT"
] | null | null | null |
tasks/generate.py
|
thedrow/pip
|
1927dfce6a5f2b1d0a1cfd5c09e29c7422c769cd
|
[
"MIT"
] | null | null | null |
tasks/generate.py
|
thedrow/pip
|
1927dfce6a5f2b1d0a1cfd5c09e29c7422c769cd
|
[
"MIT"
] | 1
|
2018-09-20T21:00:47.000Z
|
2018-09-20T21:00:47.000Z
|
import base64
import io
import os
import shutil
import tempfile
import zipfile
import invoke
from . import paths
@invoke.task
def authors():
print("[generate.authors] Generating AUTHORS")
# Get our list of authors
print("[generate.authors] Collecting author names")
r = invoke.run("git log --use-mailmap --format'=%aN <%aE>'", hide=True)
authors = []
seen_authors = set()
for author in r.stdout.splitlines():
author = author.strip()
if author.lower() not in seen_authors:
seen_authors.add(author.lower())
authors.append(author)
# Sort our list of Authors by their case insensitive name
authors = sorted(authors, key=lambda x: x.lower())
# Write our authors to the AUTHORS file
print("[generate.authors] Writing AUTHORS")
with io.open("AUTHORS.txt", "w", encoding="utf8") as fp:
fp.write(u"\n".join(authors))
fp.write(u"\n")
@invoke.task
def installer(installer_path=os.path.join(paths.CONTRIB, "get-pip.py")):
print("[generate.installer] Generating installer")
# Define our wrapper script
WRAPPER_SCRIPT = """
#!/usr/bin/env python
#
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# an entire copy of pip.
#
# Pip is a thing that installs packages, pip itself is a package that someone
# might want to install, especially if they're looking to run this get-pip.py
# script. Pip has a lot of code to deal with the security of installing
# packages, various edge cases on various platforms, and other such sort of
# "tribal knowledge" that has been encoded in its code base. Because of this
# we basically include an entire copy of pip inside this blob. We do this
# because the alternatives are attempt to implement a "minipip" that probably
# doesn't do things correctly and has weird edge cases, or compress pip itself
# down into a single file.
#
# If you're wondering how this is created, it is using an invoke task located
# in tasks/generate.py called "installer". It can be invoked by using
# ``invoke generate.installer``.
import os.path
import pkgutil
import shutil
import sys
import struct
import tempfile
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
iterbytes = iter
else:
def iterbytes(buf):
return (ord(byte) for byte in buf)
try:
from base64 import b85decode
except ImportError:
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{{|}}~")
def b85decode(b):
_b85dec = [None] * 256
for i, c in enumerate(iterbytes(_b85alphabet)):
_b85dec[c] = i
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in iterbytes(chunk):
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(iterbytes(chunk)):
if _b85dec[c] is None:
raise ValueError(
'bad base85 character at position %d' % (i + j)
)
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i)
result = b''.join(out)
if padding:
result = result[:-padding]
return result
def bootstrap(tmpdir=None):
# Import pip so we can use it to install pip and maybe setuptools too
import pip
from pip.commands.install import InstallCommand
# Wrapper to provide default certificate with the lowest priority
class CertInstallCommand(InstallCommand):
def parse_args(self, args):
# If cert isn't specified in config or environment, we provide our
# own certificate through defaults.
# This allows user to specify custom cert anywhere one likes:
# config, environment variable or argv.
if not self.parser.get_default_values().cert:
self.parser.defaults["cert"] = cert_path # calculated below
return super(CertInstallCommand, self).parse_args(args)
pip.commands_dict["install"] = CertInstallCommand
# We always want to install pip
packages = ["pip"]
# Check if the user has requested us not to install setuptools
if "--no-setuptools" in sys.argv or os.environ.get("PIP_NO_SETUPTOOLS"):
args = [x for x in sys.argv[1:] if x != "--no-setuptools"]
else:
args = sys.argv[1:]
# We want to see if setuptools is available before attempting to
# install it
try:
import setuptools # noqa
except ImportError:
packages += ["setuptools"]
delete_tmpdir = False
try:
# Create a temporary directory to act as a working directory if we were
# not given one.
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
delete_tmpdir = True
# We need to extract the SSL certificates from requests so that they
# can be passed to --cert
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
sys.exit(pip.main(["install", "--upgrade"] + packages + args))
finally:
# Remove our temporary directory
if delete_tmpdir and tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
def main():
tmpdir = None
try:
# Create a temporary working directory
tmpdir = tempfile.mkdtemp()
# Unpack the zipfile into the temporary directory
pip_zip = os.path.join(tmpdir, "pip.zip")
with open(pip_zip, "wb") as fp:
fp.write(b85decode(DATA.replace(b"\\n", b"")))
# Add the zipfile to sys.path so that we can import it
sys.path.insert(0, pip_zip)
# Run the bootstrap
bootstrap(tmpdir=tmpdir)
finally:
# Clean up our temporary working directory
if tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
DATA = b\"\"\"
{zipfile}
\"\"\"
if __name__ == "__main__":
main()
""".lstrip()
# Get all of the files we want to add to the zip file
print("[generate.installer] Collect all the files that should be zipped")
all_files = []
for root, dirs, files in os.walk(os.path.join(paths.PROJECT_ROOT, "pip")):
for pyfile in files:
if os.path.splitext(pyfile)[1] in {".py", ".pem", ".cfg", ".exe"}:
path = os.path.join(root, pyfile)
all_files.append(
"/".join(
path.split("/")[len(paths.PROJECT_ROOT.split("/")):]
)
)
tmpdir = tempfile.mkdtemp()
try:
# Get a temporary path to use as staging for the pip zip
zpth = os.path.join(tmpdir, "pip.zip")
# Write the pip files to the zip archive
print("[generate.installer] Generate the bundled zip of pip")
with zipfile.ZipFile(zpth, "w", compression=zipfile.ZIP_DEFLATED) as z:
for filename in all_files:
z.write(os.path.join(paths.PROJECT_ROOT, filename), filename)
# Get the binary data that compromises our zip file
with open(zpth, "rb") as fp:
data = fp.read()
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
# Write out the wrapper script that will take the place of the zip script
# The reason we need to do this instead of just directly executing the
# zip script is that while Python will happily execute a zip script if
# passed it on the file system, it will not however allow this to work if
# passed it via stdin. This means that this wrapper script is required to
# make ``curl https://...../get-pip.py | python`` continue to work.
print(
"[generate.installer] Write the wrapper script with the bundled zip "
"file"
)
zipdata = base64.b85encode(data).decode("utf8")
chunked = []
for i in range(0, len(zipdata), 79):
chunked.append(zipdata[i:i + 79])
with open(installer_path, "w") as fp:
fp.write(WRAPPER_SCRIPT.format(zipfile="\n".join(chunked)))
# Ensure the permissions on the newly created file
oldmode = os.stat(installer_path).st_mode & 0o7777
newmode = (oldmode | 0o555) & 0o7777
os.chmod(installer_path, newmode)
print("[generate.installer] Generated installer")
| 33.958801
| 79
| 0.621264
|
ce1e1f78d3f47deed2f7a218387c63a109304342
| 243
|
py
|
Python
|
default_config.py
|
Crescent-Labs/iqra-web
|
0577e4708b92548502564e2d7b4ae6878bf4c1bb
|
[
"MIT"
] | 25
|
2016-09-08T16:13:20.000Z
|
2021-03-27T22:25:01.000Z
|
default_config.py
|
Crescent-Labs/iqra-web
|
0577e4708b92548502564e2d7b4ae6878bf4c1bb
|
[
"MIT"
] | 13
|
2016-08-31T06:06:21.000Z
|
2022-02-12T05:27:11.000Z
|
default_config.py
|
Crescent-Labs/iqra-web
|
0577e4708b92548502564e2d7b4ae6878bf4c1bb
|
[
"MIT"
] | 8
|
2016-11-21T18:00:17.000Z
|
2020-05-16T23:23:10.000Z
|
# NOTE: Copy this file and rename it to "config.py", then update its values.
API_ROUTE = 'http://0.0.0.0:5000'
# Update these values:
API_KEY = "abcd" # Replace with api key
SECRET_KEY = "abcd" # Used by SeaSurf for generating CSRF tokens
| 30.375
| 76
| 0.707819
|
51e7baa35daada7a5c4eccf2acbf4fe13e7a3620
| 28,491
|
py
|
Python
|
tests/conftest.py
|
broeder-j/aiida_fleur_plugin
|
cca54b194f4b217abb69aaa1fca0db52c6c830c3
|
[
"MIT"
] | 1
|
2017-02-07T12:31:38.000Z
|
2017-02-07T12:31:38.000Z
|
tests/conftest.py
|
broeder-j/aiida_fleur_plugin
|
cca54b194f4b217abb69aaa1fca0db52c6c830c3
|
[
"MIT"
] | 16
|
2017-04-03T11:42:50.000Z
|
2017-05-18T16:25:39.000Z
|
tests/conftest.py
|
broeder-j/aiida_fleur_plugin
|
cca54b194f4b217abb69aaa1fca0db52c6c830c3
|
[
"MIT"
] | null | null | null |
# pylint: disable=redefined-outer-name
"""Initialise a text database and profile for pytest.
This part of code is copied from aiida-quantumespresso"""
import io
import os
from collections.abc import Mapping
import pytest
import sys
from aiida.orm import Node, Code, Dict, RemoteData, CalcJobNode
from pathlib import Path
CONFTEST_LOCATION = Path(__file__).parent.resolve()
# aiida_testing.mock_codes in development, not yet a stable dependency
# therefore we try to import it and if it fails we skip tests with it
RUN_REGRESSION_TESTS = True
try:
import aiida_testing
from aiida_testing.export_cache._fixtures import run_with_cache, export_cache, load_cache, hash_code_by_entrypoint
except ImportError:
print('AiiDA-testing not in path. Running without regression tests for Workchains and CalcJobs.')
RUN_REGRESSION_TESTS = False
if RUN_REGRESSION_TESTS:
pytest_plugins = [
'aiida.manage.tests.pytest_fixtures', 'aiida_testing.mock_code', 'aiida_testing.export_cache',
'masci_tools.testing.bokeh'
] # pylint: disable=invalid-name
else:
pytest_plugins = ['aiida.manage.tests.pytest_fixtures', 'masci_tools.testing.bokeh']
def pytest_addoption(parser):
parser.addoption('--local-exe-hdf5', action='store_true', help='Is the local executable compiled with HDF5')
def pytest_configure(config):
"""
Here you can add things by a pytest config, could be also part of a separate file
So far we add some markers here to be able to execute a certain group of tests
We make them all lowercaps as convention
"""
config.addinivalue_line('markers',
'regression_test: test using the aiida-testing plugin for workflow regression tests')
def pytest_collection_modifyitems(session, config, items):
"""After test collection modify collection.
Skip regression test if aiida-tesing is not there
"""
import aiida
skip_regression = pytest.mark.skip(
reason='Workflow regression test is skipped, because aiida-testing is not available')
aiida_version_skip = pytest.mark.skipif(
aiida.get_version().startswith('2.'),
reason='Workflow regression test is skipped, because aiida-testing is not compatible with AiiDA 2.0')
regression_items = [item for item in items if 'regression_test' in item.keywords]
if not RUN_REGRESSION_TESTS:
for item in regression_items:
item.add_marker(skip_regression)
for item in regression_items:
item.add_marker(aiida_version_skip)
@pytest.fixture(scope='function')
def fixture_sandbox():
"""Return a `SandboxFolder`."""
from aiida.common.folders import SandboxFolder
with SandboxFolder() as folder:
yield folder
@pytest.fixture
def fixture_localhost(aiida_localhost):
"""Return a localhost `Computer`."""
localhost = aiida_localhost
localhost.set_default_mpiprocs_per_machine(1)
return localhost
@pytest.fixture
def fixture_code(fixture_localhost):
"""Return a `Code` instance configured to run calculations of given entry point on localhost `Computer`."""
def _fixture_code(entry_point_name):
return Code(input_plugin_name=entry_point_name, remote_computer_exec=[fixture_localhost, '/bin/ls'])
return _fixture_code
@pytest.fixture(name='test_file')
def test_file_fixture():
"""Test file fixture"""
def _test_file(relative_path):
"""
Return path to file in the tests/files folder
Returns filesystem path
"""
return os.fspath(CONFTEST_LOCATION / 'files' / Path(relative_path))
return _test_file
@pytest.fixture
def generate_calc_job():
"""Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes.
The fixture will return the `CalcInfo` returned by `prepare_for_submission` and the temporary folder that was passed
to it, into which the raw input files will have been written.
"""
def _generate_calc_job(folder, entry_point_name, inputs=None):
"""Fixture to generate a mock `CalcInfo` for testing calculation jobs."""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import CalculationFactory
manager = get_manager()
runner = manager.get_runner()
process_class = CalculationFactory(entry_point_name)
process = instantiate_process(runner, process_class, **inputs)
calc_info = process.prepare_for_submission(folder)
return calc_info
return _generate_calc_job
@pytest.fixture
def generate_calc_job_node(fixture_localhost):
"""Fixture to generate a mock `CalcJobNode` for testing parsers."""
def flatten_inputs(inputs, prefix=''):
"""Flatten inputs recursively like :meth:`aiida.engine.processes.process::Process._flatten_inputs`."""
flat_inputs = []
for key, value in inputs.items():
if isinstance(value, Mapping):
flat_inputs.extend(flatten_inputs(value, prefix=prefix + key + '__'))
else:
flat_inputs.append((prefix + key, value))
return flat_inputs
def _generate_calc_job_node(entry_point_name,
computer=None,
test_name=None,
inputs=None,
attributes=None,
store=False,
retrieve_list=None):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
from aiida import orm
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
if computer is None:
computer = fixture_localhost
entry_point = format_entry_point_string('aiida.calculations', entry_point_name)
node = orm.CalcJobNode(computer=computer, process_type=entry_point)
node.set_attribute('input_filename', 'aiida.in')
node.set_attribute('output_filename', 'aiida.out')
node.set_attribute('error_filename', 'aiida.err')
node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
node.set_option('withmpi', True)
node.set_option('max_wallclock_seconds', 1800)
if retrieve_list is not None:
node.set_attribute('retrieve_list', retrieve_list)
if attributes:
node.set_attribute_many(attributes)
if inputs:
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node, link_type=LinkType.INPUT_CALC, link_label=link_label)
if store: # needed if test_name is not None
node.store()
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', entry_point_name[len('fleur.'):], test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp')
remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
remote_folder.store()
return node
return _generate_calc_job_node
@pytest.fixture
def generate_structure():
"""Return a `StructureData` representing bulk silicon."""
def _generate_structure():
"""Return a `StructureData` representing bulk silicon."""
from aiida.orm import StructureData
param = 5.43
cell = [[0, param / 2., param / 2.], [param / 2., 0, param / 2.], [param / 2., param / 2., 0]]
structure = StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols='Si', name='Si')
structure.append_atom(position=(param / 4., param / 4., param / 4.), symbols='Si', name='Si')
return structure
return _generate_structure
@pytest.fixture
def generate_smco5_structure():
"""Return a `StructureData` representing SmCo5"""
def _generate_structure():
"""Return a `StructureData` representing SmCo5"""
from aiida.orm import StructureData
import numpy as np
a = 4.9679
c = 3.9629
cell = np.array([[a, 0.0, 0.0], [a * np.cos(2 * np.pi / 3), a * np.sin(2 * np.pi / 3), 0.0], [0.0, 0.0, c]])
structure = StructureData(cell=cell)
structure.append_atom(position=[0.0, 0.0, 0.0], symbols='Sm', name='Sm')
structure.append_atom(position=np.array([1 / 3, 2 / 3, 0.0]) @ cell, symbols='Co', name='Co')
structure.append_atom(position=np.array([2 / 3, 1 / 3, 0.0]) @ cell, symbols='Co', name='Co')
structure.append_atom(position=np.array([0.0, 0.5, 0.5]) @ cell, symbols='Co', name='Co')
structure.append_atom(position=np.array([0.5, 0.0, 0.5]) @ cell, symbols='Co', name='Co')
structure.append_atom(position=np.array([0.5, 0.5, 0.5]) @ cell, symbols='Co', name='Co')
return structure
return _generate_structure
@pytest.fixture
def generate_retrieved_data():
"""
Generate orm.FolderData for retrieved output
"""
def _generate_retrieved_data(node, name, calc_type='fleur'):
"""
Generate FolderData for the retrieved output of the given node
"""
from aiida import orm
from aiida.common import LinkType
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', calc_type, name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved')
retrieved.store()
return retrieved
return _generate_retrieved_data
@pytest.fixture
def generate_kpoints_mesh():
"""Return a `KpointsData` node."""
def _generate_kpoints_mesh(npoints):
"""Return a `KpointsData` with a mesh of npoints in each direction."""
from aiida.orm import KpointsData
kpoints = KpointsData()
kpoints.set_kpoints_mesh([npoints] * 3)
return kpoints
return _generate_kpoints_mesh
@pytest.fixture(scope='session')
def generate_parser():
"""Fixture to load a parser class for testing parsers."""
def _generate_parser(entry_point_name):
"""Fixture to load a parser class for testing parsers.
:param entry_point_name: entry point name of the parser class
:return: the `Parser` sub class
"""
from aiida.plugins import ParserFactory
return ParserFactory(entry_point_name)
return _generate_parser
@pytest.fixture
def generate_remote_data():
"""Return a `RemoteData` node."""
def _generate_remote_data(computer, remote_path, entry_point_name=None):
"""Return a `RemoteData` node pointing to given path."""
from aiida.common.links import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.calculations', entry_point_name)
remote = RemoteData(remote_path=remote_path)
remote.computer = computer
if entry_point_name is not None:
creator = CalcJobNode(computer=computer, process_type=entry_point)
creator.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})
remote.add_incoming(creator, link_type=LinkType.CREATE, link_label='remote_folder')
creator.store()
return remote
return _generate_remote_data
############### Here AiiDA-Fleur fixtures begin ##################
@pytest.fixture
def create_fleurinp():
"""Returns fleurinp constuctor"""
from aiida.plugins import DataFactory
fleurinp = DataFactory('fleur.fleurinp')
def _make_fleurinp(inpxmlfilepath, additional_files=None):
if additional_files is None:
additional_files = []
return fleurinp(files=[inpxmlfilepath] + additional_files)
return _make_fleurinp
@pytest.fixture
def inpxml_etree():
"""Returns the etree generator"""
def _get_etree(path, return_schema=False):
from lxml import etree
from masci_tools.io.parsers.fleur_schema import InputSchemaDict
with open(path) as inpxmlfile:
tree = etree.parse(inpxmlfile)
version = tree.getroot().attrib['fleurInputVersion']
schema_dict = InputSchemaDict.fromVersion(version)
if return_schema:
return tree, schema_dict
else:
return tree
return _get_etree
@pytest.fixture
def eval_xpath():
"""Return the eval_xpath function"""
def _eval_xpath(node, xpath, list_return=False):
from masci_tools.util.xml.common_functions import eval_xpath
return eval_xpath(node, xpath, list_return=list_return)
return _eval_xpath
@pytest.fixture
def generate_inputs_base(fixture_code, create_fleurinp, generate_kpoints_mesh):
"""Generate default inputs for a `PwCalculation."""
def _generate_inputs_base():
"""Generate default inputs for a `PwCalculation."""
from aiida_fleur.common.defaults import default_options
TEST_INPXML_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'files/inpxml/Si/inp.xml'))
inputs = {
'code': fixture_code('fleur'),
'fleurinpdata': create_fleurinp(TEST_INPXML_PATH),
'options': Dict(dict=default_options)
}
return inputs
return _generate_inputs_base
@pytest.fixture
def generate_workchain_base(generate_workchain, generate_inputs_base, generate_calc_job_node):
"""Generate an instance of a `FleurBaseWorkChain`."""
def _generate_workchain_base(exit_code=None, inputs=None, return_inputs=False):
from plumpy import ProcessState
entry_point = 'fleur.base'
if inputs is None:
inputs = generate_inputs_base()
if return_inputs:
return inputs
process = generate_workchain(entry_point, inputs)
if exit_code is not None:
node = generate_calc_job_node('fleur.fleur', inputs={'parameters': Dict()})
node.set_process_state(ProcessState.FINISHED)
node.set_exit_status(exit_code.status)
process.ctx.iteration = 1
process.ctx.children = [node]
return process
return _generate_workchain_base
@pytest.fixture
def generate_workchain():
"""Generate an instance of a `WorkChain`."""
def _generate_workchain(entry_point, inputs):
"""Generate an instance of a `WorkChain` with the given entry point and inputs.
:param entry_point: entry point name of the work chain subclass.
:param inputs: inputs to be passed to process construction.
:return: a `WorkChain` instance.
"""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import WorkflowFactory
process_class = WorkflowFactory(entry_point)
runner = get_manager().get_runner()
process = instantiate_process(runner, process_class, **inputs)
return process
return _generate_workchain
@pytest.fixture
def generate_work_chain_node():
"""Fixture to generate a mock `WorkChainNode` for testing parsers."""
def flatten_inputs(inputs, prefix=''):
"""Flatten inputs recursively like :meth:`aiida.engine.processes.process::Process._flatten_inputs`."""
flat_inputs = []
for key, value in inputs.items():
if isinstance(value, Mapping):
flat_inputs.extend(flatten_inputs(value, prefix=prefix + key + '__'))
else:
flat_inputs.append((prefix + key, value))
return flat_inputs
def _generate_work_chain_node(entry_point_name, computer, test_name=None, inputs=None, attributes=None):
"""Fixture to generate a mock `WorkChainNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder.
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
from aiida import orm
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.workchains', entry_point_name)
node = orm.WorkChainNode(computer=computer, process_type=entry_point)
if attributes:
node.set_attribute_many(attributes)
if inputs:
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node, link_type=LinkType.INPUT_WORK, link_label=link_label)
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', entry_point_name[len('quantumespresso.'):],
test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer, remote_path='/tmp')
remote_folder.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
remote_folder.store()
return node
return _generate_work_chain_node
@pytest.fixture
def generate_film_structure():
"""Return a `StructureData` representing bulk silicon."""
def _generate_film_structure():
"""Return a `StructureData` representing bulk silicon."""
from aiida.orm import StructureData
from aiida_fleur.common.constants import BOHR_A
a = 7.497 * BOHR_A
cell = [[0.7071068 * a, 0.0, 0.0], [0.0, 1.0 * a, 0.0], [0.0, 0.0, 0.7071068 * a]]
structure = StructureData(cell=cell)
structure.append_atom(position=(0., 0., -1.99285 * BOHR_A), symbols='Fe')
structure.append_atom(position=(0.5 * 0.7071068 * a, 0.5 * a, 0.0), symbols='Pt')
structure.append_atom(position=(0., 0., 2.65059 * BOHR_A), symbols='Pt')
structure.pbc = (True, True, False)
return structure
return _generate_film_structure
@pytest.fixture
def generate_sym_film_structure():
"""Return a `StructureData` representing bulk silicon."""
def _generate_film_structure():
"""Return a `StructureData` representing bulk silicon."""
from aiida.orm import StructureData
from aiida_fleur.common.constants import BOHR_A
a = 7.497 * BOHR_A
cell = [[0.7071068 * a, 0.0, 0.0], [0.0, 1.0 * a, 0.0], [0.0, 0.0, 0.7071068 * a]]
structure = StructureData(cell=cell)
structure.append_atom(position=(0., 0., -1.99285 * BOHR_A), symbols='Fe')
structure.append_atom(position=(0.5 * 0.7071068 * a, 0.5 * a, 0.0), symbols='Pt')
structure.append_atom(position=(0., 0., 1.99285 * BOHR_A), symbols='Fe')
structure.pbc = (True, True, False)
return structure
return _generate_film_structure
@pytest.fixture(scope='function', autouse=True)
def clear_database_aiida_fleur(clear_database): # pylint: disable=redefined-outer-name
"""Clear the database before each test.
"""
#aiida_profile.reset_db()
#yield
#aiida_profile.reset_db()
@pytest.fixture
def read_dict_from_file():
"""returns a dict read from a json file to construct and Outputnode of a JobCalc or Workchain"""
def _read_dict_from_file(jsonfilepath):
"""Return dict from json"""
import json
node_dict = {}
with open(jsonfilepath) as jfile:
node_dict = json.load(jfile)
return node_dict
return _read_dict_from_file
@pytest.fixture
def generate_structure2():
"""Return a `StructureData` representing bulk silicon."""
def _generate_structure2():
"""Return a `StructureData` representing bulk silicon."""
from aiida.orm import StructureData
def rel_to_abs(vector, cell):
"""
converts interal coordinates to absolut coordinates in Angstroem.
"""
if len(vector) == 3:
postionR = vector
row1 = cell[0]
row2 = cell[1]
row3 = cell[2]
new_abs_pos = [
postionR[0] * row1[0] + postionR[1] * row2[0] + postionR[2] * row3[0],
postionR[0] * row1[1] + postionR[1] * row2[1] + postionR[2] * row3[1],
postionR[0] * row1[2] + postionR[1] * row2[2] + postionR[2] * row3[2]
]
return new_abs_pos
bohr_a_0 = 0.52917721092 # A
a = 5.167355275190 * bohr_a_0
cell = [[0.0, a, a], [a, 0.0, a], [a, a, 0.0]]
structure = StructureData(cell=cell)
pos1 = rel_to_abs((1. / 8., 1. / 8., 1. / 8.), cell)
pos2 = rel_to_abs((-1. / 8., -1. / 8., -1. / 8.), cell)
structure.append_atom(position=pos1, symbols='Si')
structure.append_atom(position=pos2, symbols='Si')
return structure
return _generate_structure2
@pytest.fixture
def generate_structure_W():
"""Return a `StructureData` representing bulk tungsten."""
def _generate_structure_W():
"""Return a `StructureData` representing bulk tungsten."""
from aiida.orm import StructureData
# W bcc structure
bohr_a_0 = 0.52917721092 # A
a = 3.013812049196 * bohr_a_0
cell = [[-a, a, a], [a, -a, a], [a, a, -a]]
structure = StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols='W', name='W')
#param = 3.18968 # 1.58950065353588 * 0.5291772109
#cell = [[-param, param, param], [param, -param, param], [param, param, -param]]
#structure = StructureData(cell=cell)
#structure.append_atom(position=(0., 0., 0.), symbols='W', name='W')
return structure
return _generate_structure_W
@pytest.fixture
def generate_structure_cif():
"""Return a `StructureData` from a cif file path."""
def _generate_structure_cif(cif_filepath):
"""Return a `StructureData` from a cif file."""
from aiida.orm import CifData
structure = CifData.get_or_create(cif_filepath)[0].get_structure()
return structure
return _generate_structure_cif
@pytest.fixture(scope='function')
def inpgen_local_code(mock_code_factory, request):
"""
Create, inpgen code
"""
#Adapted from shared_datadir of pytest-datadir to not use paths
#in the tmp copies created by pytest
data_dir = Path(os.path.join(request.fspath.dirname, 'calculations'))
if not data_dir.is_dir():
data_dir.mkdir()
InpgenCode = mock_code_factory(label='inpgen',
data_dir_abspath=data_dir,
entry_point='fleur.inpgen',
ignore_files=[
'_aiidasubmit.sh', 'FleurInputSchema.xsd', 'scratch', 'usage.json', '*.config',
'*.econfig', 'struct.xsf'
])
return InpgenCode
@pytest.fixture(scope='function')
def fleur_local_code(mock_code_factory, pytestconfig, request):
"""
Create or load Fleur code
"""
#Adapted from shared_datadir of pytest-datadir to not use paths
#in the tmp copies created by pytest
data_dir = Path(os.path.join(request.fspath.dirname, 'calculations'))
if not data_dir.is_dir():
data_dir.mkdir()
FleurCode = mock_code_factory(label='fleur',
data_dir_abspath=data_dir,
entry_point='fleur.fleur',
ignore_files=[
'_aiidasubmit.sh', 'cdnc', 'out', 'FleurInputSchema.xsd', 'FleurOutputSchema.xsd',
'cdn.hdf', 'usage.json', 'cdn*', 'mixing_history*', 'juDFT_times.json',
'*.config', '*.econfig', 'struct*.xsf', 'band.gnu'
])
if pytestconfig.getoption('--local-exe-hdf5'):
FleurCode.description = 'Local executable with HDF5'
return FleurCode
@pytest.fixture
def import_with_migrate(temp_dir):
"""Import an aiida export file and migrate it
We want to be able to run the test with several aiida versions,
therefore imports have to be migrate, but we also do not want to use verdi
"""
# This function has some deep aiida imports which might change in the future
_DEFAULT_IMPORT_KWARGS = {'group': None}
try:
from aiida.tools.importexport import import_data
def _import_with_migrate(filename, tempdir=temp_dir, import_kwargs=None, try_migration=True):
from click import echo
from aiida.tools.importexport import import_data
from aiida.tools.importexport import EXPORT_VERSION, IncompatibleArchiveVersionError
# these are only availbale after aiida >= 1.5.0, maybe rely on verdi import instead
from aiida.tools.importexport import detect_archive_type
from aiida.tools.importexport.archive.migrators import get_migrator
from aiida.tools.importexport.common.config import ExportFileFormat
if import_kwargs is None:
import_kwargs = _DEFAULT_IMPORT_KWARGS
archive_path = filename
try:
import_data(archive_path, **import_kwargs)
except IncompatibleArchiveVersionError:
#raise ValueError
if try_migration:
echo(f'incompatible version detected for {archive_path}, trying migration')
migrator = get_migrator(detect_archive_type(archive_path))(archive_path)
archive_path = migrator.migrate(EXPORT_VERSION, None, out_compression='none', work_dir=tempdir)
import_data(archive_path, **import_kwargs)
else:
raise
except ImportError:
# This is the case for aiida >= 2.0.0
def _import_with_migrate(filename, import_kwargs=None, try_migration=True):
from click import echo
from aiida.tools.archive import import_archive, get_format
from aiida.common.exceptions import IncompatibleStorageSchema
if import_kwargs is None:
import_kwargs = _DEFAULT_IMPORT_KWARGS
archive_path = filename
try:
import_archive(archive_path, **import_kwargs)
except IncompatibleStorageSchema:
if try_migration:
echo(f'incompatible version detected for {archive_path}, trying migration')
archive_format = get_format()
version = archive_format.latest_version
archive_format.migrate(archive_path, archive_path, version, force=True, compression=6)
import_archive(archive_path, **import_kwargs)
else:
raise
return _import_with_migrate
| 36.573813
| 120
| 0.651504
|
094c0e444b8de586b353b60444d9b4981ea2b1d5
| 214
|
py
|
Python
|
erpnext_formbuilder/erpnext_formbuilder/doctype/form/test_form.py
|
erpnext-apps-store/erpnext_formbuilder
|
4227292ac53f70e57789d70c667da54800d7b27d
|
[
"MIT"
] | 2
|
2020-06-23T23:47:19.000Z
|
2022-03-12T04:59:54.000Z
|
erpnext_formbuilder/erpnext_formbuilder/doctype/form/test_form.py
|
askmetoo/erpnext_formbuilder
|
4227292ac53f70e57789d70c667da54800d7b27d
|
[
"MIT"
] | null | null | null |
erpnext_formbuilder/erpnext_formbuilder/doctype/form/test_form.py
|
askmetoo/erpnext_formbuilder
|
4227292ac53f70e57789d70c667da54800d7b27d
|
[
"MIT"
] | 8
|
2020-04-25T18:11:59.000Z
|
2022-02-22T09:43:02.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Havenir Solutions and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestForm(unittest.TestCase):
pass
| 19.454545
| 56
| 0.761682
|
057dcc3a5b32e14b9a418246a92bc788ddfb0d0d
| 3,826
|
py
|
Python
|
plaso/parsers/networkminer.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 1
|
2020-12-04T10:26:34.000Z
|
2020-12-04T10:26:34.000Z
|
plaso/parsers/networkminer.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/networkminer.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Parser for NetworkMiner .fileinfos files."""
from __future__ import unicode_literals
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import dsv_parser
from plaso.parsers import manager
class NetworkMinerEventData(events.EventData):
"""NetworkMiner event Data.
Attributes:
destination_ip (str): Destination IP address.
destination_port (str): Destination port number.
file_details (string): Details about the file.
file_md5 (string): MD5 hash of the file.
file_path (string): File path to where it was downloaded.
file_size (string): Size of the file.
filename (string): Name of the file.
source_ip (str): Originating IP address.
source_port (str): Originating port number.
"""
DATA_TYPE = 'networkminer:fileinfos:file'
def __init__(self):
super(NetworkMinerEventData, self).__init__(data_type=self.DATA_TYPE)
self.destination_ip = None
self.destination_port = None
self.file_details = None
self.file_md5 = None
self.file_path = None
self.file_size = None
self.filename = None
self.source_ip = None
self.source_port = None
class NetworkMinerParser(dsv_parser.DSVParser):
"""Parser for NetworkMiner .fileinfos files."""
NAME = 'networkminer_fileinfo'
DATA_FORMAT = 'NetworkMiner .fileinfos file'
COLUMNS = (
'source_ip', 'source_port', 'destination_ip', 'destination_port',
'filename', 'file_path', 'file_size', 'unused', 'file_md5', 'unused2',
'file_details', 'unused4', 'timestamp')
MIN_COLUMNS = 13
def ParseRow(self, parser_mediator, row_offset, row):
"""Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
"""
event_data = NetworkMinerEventData()
if row.get('timestamp', None) != 'Timestamp':
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
for field in (
'source_ip', 'source_port', 'destination_ip',
'destination_port', 'filename', 'file_path', 'file_size', 'file_md5',
'file_details'):
setattr(event_data, field, row[field])
try:
timestamp = row.get('timestamp', None)
date_time.CopyFromStringISO8601(timestamp)
except ValueError:
parser_mediator.ProduceExtractionWarning('invalid date time value')
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
if len(row) != self.MIN_COLUMNS:
return False
# Check the date format
# If it doesn't parse, then this isn't a NetworkMiner .fileinfos file.
timestamp_value = row.get('timestamp', None)
if timestamp_value != 'Timestamp':
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
try:
date_time.CopyFromStringISO8601(timestamp_value)
except ValueError:
return False
return True
manager.ParsersManager.RegisterParser(NetworkMinerParser)
| 32.982759
| 79
| 0.706482
|
ef633e3e318be796a7e5db43cbce3a67d91bdc90
| 3,925
|
py
|
Python
|
zircon/system/utest/fidl-compiler/gen-goldens.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
zircon/system/utest/fidl-compiler/gen-goldens.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
zircon/system/utest/fidl-compiler/gen-goldens.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import os
import sys
GOLDENS_TMPL = """
// Autogenerated: Do not modify!
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "goldens.h"
std::map<std::string, std::vector<std::string>> Goldens::dep_order_ = {{
{dep_order}
}};
std::map<std::string, std::string> Goldens::json_ = {{
{json}
}};
std::map<std::string, std::string> Goldens::fidl_ = {{
{fidl}
}};
"""
def get_testname(filename):
"""
Return a standardized test name given a filename corresponding to a golden
JSON file, a fidl file, or an order.txt file.
>>> get_testname('foo/bar/testdata/mytest/order.txt')
'mytest'
>>> get_testname('foo/bar/goldens/mytest.test.json.golden')
'mytest'
>>> get_testname('foo/bar/testdata/mytest.test.fidl')
'mytest'
"""
path = filename.split('/')
dirname = 'goldens' if '/goldens/' in filename else 'testdata'
dir_index = path.index(dirname)
return path[dir_index + 1].split('.')[0]
def format_list(l):
""" Format a python list as a c++ vector
>>> format_list(['a', 'b'])
'{"a", "b"}'
"""
return '{{{0}}}'.format(', '.join(map(format_str, l)))
def format_str(s, delimiter=None):
""" Format a python str as a c++ string literal. """
if not delimiter:
return '"{0}"'.format(s)
return 'R"{delim}({s}){delim}"'.format(s=s, delim=delimiter)
def get_goldens_cc(inputs):
# group the filenames by test, for each test, we keep track of:
# the json for that test, the list of FIDL files, their dependency order,
# and the contents of those FIDL files.
testname_to_order = {}
testname_to_fidl_files = defaultdict(list)
testname_to_golden = {}
fidl_file_contents = {}
for filename in inputs:
testname = get_testname(filename)
if filename.endswith('order.txt'):
testname_to_order[testname] = open(filename, 'r').read().split()
elif '/goldens/' in filename:
testname_to_golden[testname] = open(filename, 'r').read()
elif '/testdata/' in filename:
name = os.path.basename(filename)
file_key = testname + '/' + name
testname_to_fidl_files[testname].append(file_key)
fidl_file_contents[file_key] = open(filename, 'r').read()
else:
raise RuntimeError('Unknown path: ' + filename)
# each test has exactly one golden, and at least one fidl file, so the
# keys for these two dicts should contain the exact same test names
assert (testname_to_golden.keys() == testname_to_fidl_files.keys())
# sort the list of FIDL files per test by dependency order
for testname, order in testname_to_order.items():
testname_to_fidl_files[testname].sort(
key=lambda p: order.index(os.path.basename(p)))
# output C++ file
dep_order = []
json = []
fidl = []
for testname in testname_to_golden:
dep_order.append(
'\t{{{0}, {1}}},'.format(
format_str(testname),
format_list(testname_to_fidl_files[testname])))
json.append(
'\t{{{0}, {1}}},'.format(
format_str(testname),
format_str(testname_to_golden[testname], delimiter='JSON')))
for filename, contents in fidl_file_contents.items():
fidl.append(
'\t{{{0}, {1}}},'.format(
format_str(filename), format_str(contents, delimiter='FIDL')))
return GOLDENS_TMPL.format(
dep_order='\n'.join(dep_order),
json='\n'.join(json),
fidl='\n'.join(fidl))
if __name__ == '__main__':
goldens = get_goldens_cc(sys.argv[2:])
with open(sys.argv[1], 'w') as f:
f.write(goldens)
| 32.438017
| 78
| 0.625223
|
dcd64922dd30333803d5bd6d924c10a47add6635
| 2,516
|
py
|
Python
|
z3c/recipe/usercrontab/__init__.py
|
PythonUnited/z3c.recipe.usercrontab
|
4ac64b3235365c9da88d56d07bccd543fdf2722e
|
[
"ZPL-2.1"
] | null | null | null |
z3c/recipe/usercrontab/__init__.py
|
PythonUnited/z3c.recipe.usercrontab
|
4ac64b3235365c9da88d56d07bccd543fdf2722e
|
[
"ZPL-2.1"
] | null | null | null |
z3c/recipe/usercrontab/__init__.py
|
PythonUnited/z3c.recipe.usercrontab
|
4ac64b3235365c9da88d56d07bccd543fdf2722e
|
[
"ZPL-2.1"
] | null | null | null |
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
import logging
from zc.buildout.buildout import bool_option
from z3c.recipe.usercrontab.usercrontab import UserCrontabManager
class UserCrontab:
def __init__(self, buildout, name, options):
self.options = options
options['entry'] = '%s\t%s' % (options['times'], options['command'])
self.comment = options.get('comment')
if not bool_option(self.options, 'enabled', default=True):
self.options['entry'] = '# ' + self.options['entry']
# readcrontab and writecrontab are solely for testing.
readcrontab = self.options.get('readcrontab', None)
writecrontab = self.options.get('writecrontab', None)
self.options['identifier'] = '[%s]' % name
self.crontab = UserCrontabManager(
readcrontab, writecrontab,
identifier=self.options['identifier'])
def install(self):
self.crontab.read_crontab()
self.crontab.add_entry(self.options['entry'], self.comment)
self.crontab.write_crontab()
return ()
def update(self):
self.install()
def uninstall_usercrontab(name, options):
readcrontab = options.get('readcrontab', None)
writecrontab = options.get('writecrontab', None)
identifier = options.get('identifier', 'NO IDENTIFIER')
crontab = UserCrontabManager(
readcrontab, writecrontab,
identifier=identifier)
crontab.read_crontab()
nuked = crontab.del_entry(options['entry'])
if nuked == 0:
logging.getLogger(name).warning(
"WARNING: Did not find a crontab-entry during uninstall; "
"please check manually if everything was removed correctly")
elif nuked > 1:
logging.getLogger(name).error(
"FATAL ERROR: Found more than one matching crontab-entry during "
"uninstall; please resolve manually.\nMatched lines: %s",
(options['entry']))
raise RuntimeError(
"Found more than one matching crontab-entry during uninstall")
crontab.write_crontab()
| 37.552239
| 77
| 0.674881
|
c25cfe2d104d95daa5082da40c9f45e19df6535f
| 4,538
|
py
|
Python
|
model/utils/bbox_tools.py
|
BBuf/simple-faster-rcnn-explain
|
cff53d47cf0b99e1e00475400d135248bd8e595a
|
[
"MIT"
] | 18
|
2020-05-03T01:21:58.000Z
|
2022-03-28T05:16:32.000Z
|
model/utils/bbox_tools.py
|
zengjz/simple-faster-rcnn-explain
|
cff53d47cf0b99e1e00475400d135248bd8e595a
|
[
"MIT"
] | 1
|
2021-12-10T13:16:39.000Z
|
2021-12-10T13:16:39.000Z
|
model/utils/bbox_tools.py
|
zengjz/simple-faster-rcnn-explain
|
cff53d47cf0b99e1e00475400d135248bd8e595a
|
[
"MIT"
] | 7
|
2020-05-06T09:31:33.000Z
|
2021-11-01T12:16:55.000Z
|
import numpy as np
import numpy as xp
import six
from six import __init__
# 已知源bbox和位置偏差dx,dy,dh,dw,求目标框G
def loc2bbox(src_bbox, loc):
# src_bbox:(R,4),R为bbox个数,4为左上角和右下角四个坐标
if src_bbox.shape[0] == 0:
return xp.zeros((0, 4), dtype=loc.dtype)
src_bbox = src_bbox.astype(src_bbox.dtype, copy=False)
#src_height为Ph,src_width为Pw,src_ctr_y为Py,src_ctr_x为Px
src_height = src_bbox[:, 2] - src_bbox[:, 0] #ymax-ymin
src_width = src_bbox[:, 3] - src_bbox[:, 1] #xmax-xmin
src_ctr_y = src_bbox[:, 0] + 0.5 * src_height #y0+0.5h
src_ctr_x = src_bbox[:, 1] + 0.5 * src_width #x0+0.5w,计算出中心点坐标
#python [start:stop:step]
dy = loc[:, 0::4]
dx = loc[:, 1::4]
dh = loc[:, 2::4]
dw = loc[:, 3::4]
# RCNN中提出的边框回归:寻找原始proposal与近似目标框G之间的映射关系,公式在上面
ctr_y = dy * src_height[:, xp.newaxis] + src_ctr_y[:, xp.newaxis] #ctr_y为Gy
ctr_x = dx * src_width[:, xp.newaxis] + src_ctr_x[:, xp.newaxis] # ctr_x为Gx
h = xp.exp(dh) * src_height[:, xp.newaxis] #h为Gh
w = xp.exp(dw) * src_width[:, xp.newaxis] #w为Gw
# 上面四行得到了回归后的目标框(Gx,Gy,Gh,Gw)
# 由中心点转换为左上角和右下角坐标
dst_bbox = xp.zeros(loc.shape, dtype=loc.dtype)
dst_bbox[:, 0::4] = ctr_y - 0.5 * h
dst_bbox[:, 1::4] = ctr_x - 0.5 * w
dst_bbox[:, 2::4] = ctr_y + 0.5 * h
dst_bbox[:, 3::4] = ctr_x + 0.5 * w
return dst_bbox
# 已知源框和目标框求出其位置偏差
def bbox2loc(src_bbox, dst_bbox):
# 计算出源框中心点坐标
height = src_bbox[:, 2] - src_bbox[:, 0]
width = src_bbox[:, 3] - src_bbox[:, 1]
ctr_y = src_bbox[:, 0] + 0.5 * height
ctr_x = src_bbox[:, 1] + 0.5 * width
# 计算出目标框中心点坐标
base_height = dst_bbox[:, 2] - dst_bbox[:, 0]
base_width = dst_bbox[:, 3] - dst_bbox[:, 1]
base_ctr_y = dst_bbox[:, 0] + 0.5 * base_height
base_ctr_x = dst_bbox[:, 1] + 0.5 * base_width
# 求出最小的正数
eps = xp.finfo(height.dtype).eps
# 将height,width与其比较保证全部是非负
height = xp.maximum(height, eps)
width = xp.maximum(width, eps)
# 根据上面的公式二计算dx,dy,dh,dw
dy = (base_ctr_y - ctr_y) / height
dx = (base_ctr_x - ctr_x) / width
dh = xp.log(base_height / height)
dw = xp.log(base_width / width)
# np.vstack按照行的顺序把数组给堆叠起来
loc = xp.vstack((dy, dx, dh, dw)).transpose()
return loc
# 求两个bbox的相交的交并比
def bbox_iou(bbox_a, bbox_b):
# 确保bbox第二维为bbox的四个坐标(ymin,xmin,ymax,xmax)
if bbox_a.shape[1] != 4 or bbox_b.shape[1] != 4:
raise IndexError
# top left
# l为交叉部分框左上角坐标最大值,为了利用numpy的广播性质,
# bbox_a[:, None, :2]的shape是(N,1,2),bbox_b[:, :2]
# shape是(K,2),由numpy的广播性质,两个数组shape都变成(N,K,2),
# 也就是对a里每个bbox都分别和b里的每个bbox求左上角点坐标最大值
tl = xp.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
# bottom right
# br为交叉部分框右下角坐标最小值
br = xp.minimum(bbox_a[:, None, 2:], bbox_b[:, 2:])
# 所有坐标轴上tl<br时,返回数组元素的乘积(y1max-yimin)X(x1max-x1min),
# bboxa与bboxb相交区域的面积
area_i = xp.prod(br - tl, axis=2) * (tl < br).all(axis=2)
# 计算bboxa的面积
area_a = xp.prod(bbox_a[:, 2:] - bbox_a[:, :2], axis=1)
# 计算bboxb的面积
area_b = xp.prod(bbox_b[:, 2:] - bbox_b[:, :2], axis=1)
# 计算IOU
return area_i / (area_a[:, None] + area_b - area_i)
def __test():
pass
if __name__ == '__main__':
__test()
# 对特征图features以基准长度为16、选择合适的ratios和scales取基准锚点
# anchor_base。(选择长度为16的原因是图片大小为600*800左右,基准长度
# 16对应的原图区域是256*256,考虑放缩后的大小有128*128,512*512比较合适)
def generate_anchor_base(base_size=16, ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32]):
# 根据基准点生成9个基本的anchor的功能,ratios=[0.5,1,2],anchor_scales=
# [8,16,32]是长宽比和缩放比例,anchor_scales也就是在base_size的基础上再增
# 加的量,本代码中对应着三种面积的大小(16*8)^2 ,(16*16)^2 (16*32)^2
# 也就是128,256,512的平方大小
py = base_size / 2.
px = base_size / 2.
#(9,4),注意:这里只是以特征图的左上角点为基准产生的9个anchor,
anchor_base = np.zeros((len(ratios) * len(anchor_scales), 4),
dtype=np.float32)
# six.moves 是用来处理那些在python2 和 3里面函数的位置有变化的,
# 直接用six.moves就可以屏蔽掉这些变化
for i in six.moves.range(len(ratios)):
for j in six.moves.range(len(anchor_scales)):
# 生成9种不同比例的h和w
h = base_size * anchor_scales[j] * np.sqrt(ratios[i])
w = base_size * anchor_scales[j] * np.sqrt(1. / ratios[i])
index = i * len(anchor_scales) + j
# 计算出anchor_base画的9个框的左下角和右上角的4个anchor坐标值
anchor_base[index, 0] = py - h / 2.
anchor_base[index, 1] = px - w / 2.
anchor_base[index, 2] = py + h / 2.
anchor_base[index, 3] = px + w / 2.
return anchor_base
| 32.647482
| 79
| 0.610401
|
5763c2053e3504631ed27e927544f205cdfaf7b2
| 563
|
py
|
Python
|
mundo3-EstruturasCompostas/104-validandaEntradaDeDados.py
|
jonasht/CursoEmVideo-CursoDePython3
|
a1bbf1fe4226b1828213742ee5a440278d903fd1
|
[
"MIT"
] | null | null | null |
mundo3-EstruturasCompostas/104-validandaEntradaDeDados.py
|
jonasht/CursoEmVideo-CursoDePython3
|
a1bbf1fe4226b1828213742ee5a440278d903fd1
|
[
"MIT"
] | null | null | null |
mundo3-EstruturasCompostas/104-validandaEntradaDeDados.py
|
jonasht/CursoEmVideo-CursoDePython3
|
a1bbf1fe4226b1828213742ee5a440278d903fd1
|
[
"MIT"
] | null | null | null |
verde = '\033[32m'
vermelho = '\033[31m'
amarelo = '\033[33m'
azul = '\033[34m'
fim = '\033[m'
def leiaInt(mensagem):
while True:
l()
n = input(amarelo + mensagem + fim)
if n.isnumeric():
ninteiro = int(n)
break
else:
print(vermelho, 'erro, valor digitado não é inteiro\n'
'por favor digite um numero inteiro', fim)
l()
return ninteiro
def l(): print(azul+'=-'*30+'='+fim)
n = leiaInt('digite um valor: ')
print(verde, f'voce digitou o valor {n} numero inteiro')
| 25.590909
| 66
| 0.550622
|
964e7aedc5d6d1e002b543dd427940cd544cbc25
| 24,832
|
py
|
Python
|
gpiozero/devices.py
|
AnnoNuem/gpiozero
|
c55d64abd964ed3dd04dde933039a9de43369fac
|
[
"BSD-3-Clause"
] | 743
|
2019-07-31T02:57:08.000Z
|
2022-03-31T08:48:30.000Z
|
gpiozero/devices.py
|
AnnoNuem/gpiozero
|
c55d64abd964ed3dd04dde933039a9de43369fac
|
[
"BSD-3-Clause"
] | 259
|
2019-07-29T14:26:40.000Z
|
2022-03-27T00:17:56.000Z
|
gpiozero/devices.py
|
AnnoNuem/gpiozero
|
c55d64abd964ed3dd04dde933039a9de43369fac
|
[
"BSD-3-Clause"
] | 127
|
2019-08-03T19:30:18.000Z
|
2022-02-24T14:33:50.000Z
|
# vim: set fileencoding=utf-8:
#
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
#
# Copyright (c) 2015-2021 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2015-2019 Ben Nuttall <ben@bennuttall.com>
# Copyright (c) 2016 Andrew Scheller <github@loowis.durge.org>
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import atexit
import weakref
import warnings
from collections import namedtuple, OrderedDict
from itertools import chain
from types import FunctionType
from .threads import _threads_shutdown
from .mixins import (
ValuesMixin,
SharedMixin,
)
from .exc import (
BadPinFactory,
DeviceClosed,
CompositeDeviceBadName,
CompositeDeviceBadOrder,
CompositeDeviceBadDevice,
GPIOPinMissing,
GPIODeviceClosed,
NativePinFactoryFallback,
PinFactoryFallback,
)
from .compat import frozendict
native_fallback_message = (
'Falling back to the experimental pin factory NativeFactory because no other '
'pin factory could be loaded. For best results, install RPi.GPIO or pigpio. '
'See https://gpiozero.readthedocs.io/en/stable/api_pins.html for more information.'
)
class GPIOMeta(type):
# NOTE Yes, this is a metaclass. Don't be scared - it's a simple one.
def __new__(mcls, name, bases, cls_dict):
# Construct the class as normal
cls = super().__new__(mcls, name, bases, cls_dict)
# If there's a method in the class which has no docstring, search
# the base classes recursively for a docstring to copy
for attr_name, attr in cls_dict.items():
if isinstance(attr, FunctionType) and not attr.__doc__:
for base_cls in cls.__mro__:
if hasattr(base_cls, attr_name):
base_fn = getattr(base_cls, attr_name)
if base_fn.__doc__:
attr.__doc__ = base_fn.__doc__
break
return cls
def __call__(cls, *args, **kwargs):
# Make sure cls has GPIOBase somewhere in its ancestry (otherwise
# setting __attrs__ below will be rather pointless)
assert issubclass(cls, GPIOBase)
if issubclass(cls, SharedMixin):
# If SharedMixin appears in the class' ancestry, convert the
# constructor arguments to a key and check whether an instance
# already exists. Only construct the instance if the key's new.
key = cls._shared_key(*args, **kwargs)
try:
self = cls._instances[key]()
self._refs += 1
except (KeyError, AttributeError) as e:
self = super().__call__(*args, **kwargs)
self._refs = 1
# Replace the close method with one that merely decrements
# the refs counter and calls the original close method when
# it reaches zero
old_close = self.close
def close():
self._refs = max(0, self._refs - 1)
if not self._refs:
try:
old_close()
finally:
try:
del cls._instances[key]
except KeyError:
# If the _refs go negative (too many closes)
# just ignore the resulting KeyError here -
# it's already gone
pass
self.close = close
cls._instances[key] = weakref.ref(self)
else:
# Construct the instance as normal
self = super().__call__(*args, **kwargs)
# At this point __new__ and __init__ have all been run. We now fix the
# set of attributes on the class by dir'ing the instance and creating a
# frozenset of the result called __attrs__ (which is queried by
# GPIOBase.__setattr__). An exception is made for SharedMixin devices
# which can be constructed multiple times, returning the same instance
if not issubclass(cls, SharedMixin) or self._refs == 1:
self.__attrs__ = frozenset(dir(self))
return self
class GPIOBase(metaclass=GPIOMeta):
def __setattr__(self, name, value):
# This overridden __setattr__ simply ensures that additional attributes
# cannot be set on the class after construction (it manages this in
# conjunction with the meta-class above). Traditionally, this is
# managed with __slots__; however, this doesn't work with Python's
# multiple inheritance system which we need to use in order to avoid
# repeating the "source" and "values" property code in myriad places
if hasattr(self, '__attrs__') and name not in self.__attrs__:
raise AttributeError(
"'{self.__class__.__name__}' object has no attribute "
"'{name}'".format(self=self, name=name))
return super().__setattr__(name, value)
def __del__(self):
# NOTE: Yes, we implicitly call close() on __del__(), and yes for you
# dear hacker-on-this-library, this means pain!
#
# It's entirely for the convenience of command line experimenters and
# newbies who want to re-gain those pins when stuff falls out of scope
# without managing their object lifetimes "properly" with "with" (but,
# hey, this is an educational library at heart so that's the way we
# roll).
#
# What does this mean for you? It means that in close() you cannot
# assume *anything*. If someone calls a constructor with a fundamental
# mistake like the wrong number of params, then your close() method is
# going to be called before __init__ ever ran so all those attributes
# you *think* exist, erm, don't. Basically if you refer to anything in
# "self" within your close method, be preprared to catch AttributeError
# on its access to avoid spurious warnings for the end user.
#
# "But we're exiting anyway; surely exceptions in __del__ get
# squashed?" Yes, but they still cause verbose warnings and remember
# that this is an educational library; keep it friendly!
self.close()
def close(self):
"""
Shut down the device and release all associated resources (such as GPIO
pins).
This method is idempotent (can be called on an already closed device
without any side-effects). It is primarily intended for interactive use
at the command line. It disables the device and releases its pin(s) for
use by another device.
You can attempt to do this simply by deleting an object, but unless
you've cleaned up all references to the object this may not work (even
if you've cleaned up all references, there's still no guarantee the
garbage collector will actually delete the object at that point). By
contrast, the close method provides a means of ensuring that the object
is shut down.
For example, if you have a breadboard with a buzzer connected to pin
16, but then wish to attach an LED instead:
>>> from gpiozero import *
>>> bz = Buzzer(16)
>>> bz.on()
>>> bz.off()
>>> bz.close()
>>> led = LED(16)
>>> led.blink()
:class:`Device` descendents can also be used as context managers using
the :keyword:`with` statement. For example:
>>> from gpiozero import *
>>> with Buzzer(16) as bz:
... bz.on()
...
>>> with LED(16) as led:
... led.on()
...
"""
# This is a placeholder which is simply here to ensure close() can be
# safely called from subclasses without worrying whether super-classes
# have it (which in turn is useful in conjunction with the mixin
# classes).
#
# P.S. See note in __del__ above.
pass
@property
def closed(self):
"""
Returns :data:`True` if the device is closed (see the :meth:`close`
method). Once a device is closed you can no longer use any other
methods or properties to control or query the device.
"""
raise NotImplementedError
def _check_open(self):
if self.closed:
raise DeviceClosed(
'{self.__class__.__name__} is closed or uninitialized'.format(
self=self))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
class Device(ValuesMixin, GPIOBase):
"""
Represents a single device of any type; GPIO-based, SPI-based, I2C-based,
etc. This is the base class of the device hierarchy. It defines the basic
services applicable to all devices (specifically the :attr:`is_active`
property, the :attr:`value` property, and the :meth:`close` method).
.. attribute:: pin_factory
This attribute exists at both a class level (representing the default
pin factory used to construct devices when no *pin_factory* parameter
is specified), and at an instance level (representing the pin factory
that the device was constructed with).
The pin factory provides various facilities to the device including
allocating pins, providing low level interfaces (e.g. SPI), and clock
facilities (querying and calculating elapsed times).
"""
pin_factory = None # instance of a Factory sub-class
def __init__(self, *, pin_factory=None):
if pin_factory is None:
if Device.pin_factory is None:
Device.pin_factory = Device._default_pin_factory()
self.pin_factory = Device.pin_factory
else:
self.pin_factory = pin_factory
super().__init__()
@staticmethod
def _default_pin_factory():
# We prefer RPi.GPIO here as it supports PWM, and all Pi revisions. If
# no third-party libraries are available, however, we fall back to a
# pure Python implementation which supports platforms like PyPy
#
# NOTE: If the built-in pin factories are expanded, the dict must be
# updated along with the entry-points in setup.py.
default_factories = OrderedDict((
('rpigpio', 'gpiozero.pins.rpigpio:RPiGPIOFactory'),
('lgpio', 'gpiozero.pins.lgpio:LGPIOFactory'),
('pigpio', 'gpiozero.pins.pigpio:PiGPIOFactory'),
('native', 'gpiozero.pins.native:NativeFactory'),
))
name = os.environ.get('GPIOZERO_PIN_FACTORY')
if name is None:
# If no factory is explicitly specified, try various names in
# "preferred" order. For speed, we select from the dictionary above
# rather than importing pkg_resources and using load_entry_point
for name, entry_point in default_factories.items():
try:
mod_name, cls_name = entry_point.split(':', 1)
module = __import__(mod_name, fromlist=(cls_name,))
pin_factory = getattr(module, cls_name)()
if name == 'native':
warnings.warn(NativePinFactoryFallback(native_fallback_message))
return pin_factory
except Exception as e:
warnings.warn(
PinFactoryFallback(
'Falling back from {name}: {e!s}'.format(
name=name, e=e)))
raise BadPinFactory('Unable to load any default pin factory!')
elif name in default_factories:
# As above, this is a fast-path optimization to avoid loading
# pkg_resources (which it turns out was 80% of gpiozero's import
# time!)
mod_name, cls_name = default_factories[name].split(':', 1)
module = __import__(mod_name, fromlist=(cls_name,))
return getattr(module, cls_name)()
else:
# Slow path: load pkg_resources and try and find the specified
# entry-point. Try with the name verbatim first. If that fails,
# attempt with the lower-cased name (this ensures compatibility
# names work but we're still case insensitive for all factories)
import pkg_resources
group = 'gpiozero_pin_factories'
for factory in pkg_resources.iter_entry_points(group, name):
return factory.load()()
for factory in pkg_resources.iter_entry_points(group, name.lower()):
return factory.load()()
raise BadPinFactory('Unable to find pin factory "{name}"'.format(
name=name))
def __repr__(self):
try:
self._check_open()
return "<gpiozero.{self.__class__.__name__} object>".format(
self=self)
except DeviceClosed:
return "<gpiozero.{self.__class__.__name__} object closed>".format(
self=self)
def _conflicts_with(self, other):
"""
Called by :meth:`Factory.reserve_pins` to test whether the *other*
:class:`Device` using a common pin conflicts with this device's intent
to use it. The default is :data:`True` indicating that all devices
conflict with common pins. Sub-classes may override this to permit
more nuanced replies.
"""
return True
@property
def value(self):
"""
Returns a value representing the device's state. Frequently, this is a
boolean value, or a number between 0 and 1 but some devices use larger
ranges (e.g. -1 to +1) and composite devices usually use tuples to
return the states of all their subordinate components.
"""
raise NotImplementedError
@property
def is_active(self):
"""
Returns :data:`True` if the device is currently active and
:data:`False` otherwise. This property is usually derived from
:attr:`value`. Unlike :attr:`value`, this is *always* a boolean.
"""
return bool(self.value)
class CompositeDevice(Device):
"""
Extends :class:`Device`. Represents a device composed of multiple devices
like simple HATs, H-bridge motor controllers, robots composed of multiple
motors, etc.
The constructor accepts subordinate devices as positional or keyword
arguments. Positional arguments form unnamed devices accessed by treating
the composite device as a container, while keyword arguments are added to
the device as named (read-only) attributes.
For example:
.. code-block:: pycon
>>> from gpiozero import *
>>> d = CompositeDevice(LED(2), LED(3), LED(4), btn=Button(17))
>>> d[0]
<gpiozero.LED object on pin GPIO2, active_high=True, is_active=False>
>>> d[1]
<gpiozero.LED object on pin GPIO3, active_high=True, is_active=False>
>>> d[2]
<gpiozero.LED object on pin GPIO4, active_high=True, is_active=False>
>>> d.btn
<gpiozero.Button object on pin GPIO17, pull_up=True, is_active=False>
>>> d.value
CompositeDeviceValue(device_0=False, device_1=False, device_2=False, btn=False)
:param Device \\*args:
The un-named devices that belong to the composite device. The
:attr:`value` attributes of these devices will be represented within
the composite device's tuple :attr:`value` in the order specified here.
:type _order: list or None
:param _order:
If specified, this is the order of named items specified by keyword
arguments (to ensure that the :attr:`value` tuple is constructed with a
specific order). All keyword arguments *must* be included in the
collection. If omitted, an alphabetically sorted order will be selected
for keyword arguments.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
:param Device \\*\\*kwargs:
The named devices that belong to the composite device. These devices
will be accessible as named attributes on the resulting device, and
their :attr:`value` attributes will be accessible as named elements of
the composite device's tuple :attr:`value`.
"""
def __init__(self, *args, _order=None, pin_factory=None, **kwargs):
self._all = ()
self._named = frozendict({})
self._namedtuple = None
self._order = _order
try:
if self._order is None:
self._order = sorted(kwargs.keys())
else:
for missing_name in set(kwargs.keys()) - set(self._order):
raise CompositeDeviceBadOrder(
'{missing_name} missing from _order'.format(
missing_name=missing_name))
self._order = tuple(self._order)
for name in set(self._order) & set(dir(self)):
raise CompositeDeviceBadName(
'{name} is a reserved name'.format(name=name))
for dev in chain(args, kwargs.values()):
if not isinstance(dev, Device):
raise CompositeDeviceBadDevice(
"{dev} doesn't inherit from Device".format(dev=dev))
self._named = frozendict(kwargs)
self._namedtuple = namedtuple(
'{self.__class__.__name__}Value'.format(self=self), chain(
('device_{i}'.format(i=i)
for i in range(len(args))), self._order))
except:
for dev in chain(args, kwargs.values()):
if isinstance(dev, Device):
dev.close()
raise
self._all = args + tuple(kwargs[v] for v in self._order)
super().__init__(pin_factory=pin_factory)
def __getattr__(self, name):
# if _named doesn't exist yet, pretend it's an empty dict
if name == '_named':
return frozendict({})
try:
return self._named[name]
except KeyError:
raise AttributeError("no such attribute {name}".format(name=name))
def __setattr__(self, name, value):
# make named components read-only properties
if name in self._named:
raise AttributeError("can't set attribute {name}".format(name=name))
return super().__setattr__(name, value)
def __repr__(self):
try:
self._check_open()
named = len(self._named)
unnamed = len(self) - len(self._named)
if named > 0 and unnamed > 0:
template = (
"<gpiozero.{self.__class__.__name__} object containing "
"{count} devices: {names} and {unnamed} unnamed>")
elif named > 0:
template = (
"<gpiozero.{self.__class__.__name__} object containing "
"{count} devices: {names}>")
else:
template = (
"<gpiozero.{self.__class__.__name__} object containing "
"{count} unnamed devices>")
return template.format(
self=self, count=len(self), names=', '.join(self._order),
unnamed=len(self) - len(self._named))
except DeviceClosed:
return super().__repr__()
def __len__(self):
return len(self._all)
def __getitem__(self, index):
return self._all[index]
def __iter__(self):
return iter(self._all)
@property
def all(self):
# XXX Deprecate this in favour of using the instance as a container
return self._all
def close(self):
if getattr(self, '_all', None):
for device in self._all:
device.close()
self._all = ()
@property
def closed(self):
return all(device.closed for device in self)
@property
def namedtuple(self):
"""
The :func:`~collections.namedtuple` type constructed to represent the
value of the composite device. The :attr:`value` attribute returns
values of this type.
"""
return self._namedtuple
@property
def value(self):
"""
A :func:`~collections.namedtuple` containing a value for each
subordinate device. Devices with names will be represented as named
elements. Unnamed devices will have a unique name generated for them,
and they will appear in the position they appeared in the constructor.
"""
return self.namedtuple(*(device.value for device in self))
@property
def is_active(self):
"""
Composite devices are considered "active" if any of their constituent
devices have a "truthy" value.
"""
return any(self.value)
class GPIODevice(Device):
"""
Extends :class:`Device`. Represents a generic GPIO device and provides
the services common to all single-pin GPIO devices (like ensuring two
GPIO devices do no share a :attr:`pin`).
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised. If the pin is already in use by another device,
:exc:`GPIOPinInUse` will be raised.
"""
def __init__(self, pin=None, *, pin_factory=None):
super().__init__(pin_factory=pin_factory)
# self._pin must be set before any possible exceptions can be raised
# because it's accessed in __del__. However, it mustn't be given the
# value of pin until we've verified that it isn't already allocated
self._pin = None
if pin is None:
raise GPIOPinMissing('No pin given')
# Check you can reserve *before* constructing the pin
self.pin_factory.reserve_pins(self, pin)
pin = self.pin_factory.pin(pin)
self._pin = pin
self._active_state = True
self._inactive_state = False
def _state_to_value(self, state):
return int(state == self._active_state)
def _read(self):
try:
return self._state_to_value(self.pin.state)
except (AttributeError, TypeError):
self._check_open()
raise
def close(self):
super().close()
if getattr(self, '_pin', None) is not None:
self.pin_factory.release_pins(self, self._pin.number)
self._pin.close()
self._pin = None
@property
def closed(self):
try:
return self._pin is None
except AttributeError:
return True
def _check_open(self):
try:
super()._check_open()
except DeviceClosed as e:
# For backwards compatibility; GPIODeviceClosed is deprecated
raise GPIODeviceClosed(str(e))
@property
def pin(self):
"""
The :class:`Pin` that the device is connected to. This will be
:data:`None` if the device has been closed (see the
:meth:`~Device.close` method). When dealing with GPIO pins, query
``pin.number`` to discover the GPIO pin (in BCM numbering) that the
device is connected to.
"""
return self._pin
@property
def value(self):
return self._read()
def __repr__(self):
try:
return (
"<gpiozero.{self.__class__.__name__} object on pin "
"{self.pin!r}, is_active={self.is_active}>".format(
self=self))
except DeviceClosed:
return (
"<gpiozero.{self.__class__.__name__} object closed>".format(
self=self))
def _devices_shutdown():
if Device.pin_factory is not None:
with Device.pin_factory._res_lock:
reserved_devices = {
dev
for ref_list in Device.pin_factory._reservations.values()
for ref in ref_list
for dev in (ref(),)
if dev is not None
}
for dev in reserved_devices:
dev.close()
Device.pin_factory.close()
Device.pin_factory = None
def _shutdown():
_threads_shutdown()
_devices_shutdown()
atexit.register(_shutdown)
| 39.604466
| 88
| 0.603697
|
b0599e8b469ea54a23b0f3dc004f611564d7f479
| 2,587
|
py
|
Python
|
a2ml/api/auger/impl/cloud/endpoint.py
|
augerai/a2ml
|
9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86
|
[
"Apache-2.0"
] | 30
|
2019-07-01T13:23:27.000Z
|
2022-03-16T21:19:33.000Z
|
a2ml/api/auger/impl/cloud/endpoint.py
|
augerai/a2ml
|
9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86
|
[
"Apache-2.0"
] | 234
|
2019-07-04T13:56:15.000Z
|
2021-11-04T10:12:55.000Z
|
a2ml/api/auger/impl/cloud/endpoint.py
|
augerai/a2ml
|
9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86
|
[
"Apache-2.0"
] | 13
|
2019-07-04T14:00:34.000Z
|
2020-07-13T11:18:44.000Z
|
from .base import AugerBaseApi
from ..exceptions import AugerException
from .review_alert_item import AugerReviewAlertItemApi
from .review_alert import AugerReviewAlertApi
from .cluster_task import AugerClusterTaskApi
class AugerEndpointApi(AugerBaseApi):
"""Auger Endpoint API."""
def __init__(self, ctx, endpoint_api, endpoint_id=None):
super(AugerEndpointApi, self).__init__(
ctx, endpoint_api, None, endpoint_id)
def create(self, pipeline_id, name):
return self._call_create({'pipeline_id': pipeline_id, 'name': name},[])
def update(self, params):
params['id'] = self.object_id
return self._call_update(params)
def update_roi(self):
roi_names = ['review/roi/filter', 'review/roi/investment', 'review/roi/revenue']
roi_values = []
roi_exists = False
for name in roi_names:
if self.ctx.config.get(name):
roi_exists = True
roi_values.append(self.ctx.config.get(name))
if roi_exists:
res = self.rest_api.hub_client.create_endpoint_roi_validation(
endpoint_id=self.object_id,
expressions=roi_values,
)
cluster_task = AugerClusterTaskApi(self.ctx, cluster_task_id=res['data']['id'])
cluster_task.wait_for_status(['pending', 'received', 'started', 'retry'])
props = cluster_task.properties()
isValid = True
for idx, item in enumerate(props.get('result', [])):
if not item.get('is_valid'):
isValid = False
self.ctx.error("Review ROI config parameter '%s' = '%s' is invalid. Error: %s"%(
roi_names[idx], roi_values[idx], item.get('error')))
if isValid:
return self._call_update({ 'id': self.object_id,
'roi_filter': roi_values[0], 'roi_investment': roi_values[1], 'roi_revenue': roi_values[2],
})
def update_monitoring_value(self):
return self._call_update({ 'id': self.object_id,
'false_positive_cost_amount': self.ctx.config.get('review/monitoring_value/false_positive_value'),
'false_negative_cost_amount': self.ctx.config.get('review/monitoring_value/false_negative_value'),
'regression_cost_over_amount': self.ctx.config.get('review/monitoring_value/regression_value_over'),
'regression_cost_under_amount': self.ctx.config.get('review/monitoring_value/regression_value_under'),
})
| 44.603448
| 114
| 0.63162
|
81003cc2b9becec46691667317615c5a7f7d2cda
| 2,105
|
py
|
Python
|
test_status_endpoints/test_status_tweet.py
|
RSMuthu/Twitter_API-Pytest
|
b272d8ead3d51bbd0d5a4720f67905dccee27d7c
|
[
"MIT"
] | null | null | null |
test_status_endpoints/test_status_tweet.py
|
RSMuthu/Twitter_API-Pytest
|
b272d8ead3d51bbd0d5a4720f67905dccee27d7c
|
[
"MIT"
] | null | null | null |
test_status_endpoints/test_status_tweet.py
|
RSMuthu/Twitter_API-Pytest
|
b272d8ead3d51bbd0d5a4720f67905dccee27d7c
|
[
"MIT"
] | null | null | null |
import pytest
from conftest import twitter_session, BASE_URL
from utils import get_home_tweets
# status list to tweet
status_list = {"We welcome you to MSD family :)", "Hello World !!"}
@pytest.mark.run(order=1) ## ording test cases -- make tweet first as first test case
@pytest.mark.parametrize("status_text", status_list) ## making it parametrized with the iterable "status text"
def test_make_tweet(twitter_session, status_text):
'''
Test Case for the creation of a tweet.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
status_text - the text which will be dumped in the tweet created for testing.
'''
# making API call to post the tweet with the status_text provide
resp = twitter_session.post(f"{BASE_URL}/statuses/update.json", params={'status': status_text})
print (f"\nTweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the tweet is made successfully
assert resp.status_code == 200
# Assert to Confirm if the tweet made is having correct data
assert resp.json()['text'] == status_text
@pytest.mark.run(order=4) ## ordering test cases -- delete the tweet after all the test cases are done
def test_delete_tweet(twitter_session):
'''
Test Case for the deletion of a tweet.
This test case is executed post creation.
We will be searching for the tweet from the home timeline and deleting it.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
'''
# loop through the tweets made as part of test case
for tweet in get_home_tweets(twitter_session, tweet_count=len(status_list)):
# verifing if its the same tweet we had made, before deleting
if tweet['text'] in status_list:
# API call to delete the tweet
resp = twitter_session.post(f"{BASE_URL}/statuses/destroy/{tweet['id']}.json")
print (f"\nDelete tweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the request made successfully
assert resp.status_code == 200
| 46.777778
| 110
| 0.709264
|
9dd053f2ff73215943f6f25490c3658b401c3c57
| 5,104
|
py
|
Python
|
soapfish/soap.py
|
soapteam/soapfish
|
3f9b531d5cf14a063edbda16a62ace23fabe6d06
|
[
"BSD-3-Clause"
] | 20
|
2016-07-20T01:12:40.000Z
|
2021-12-16T02:49:49.000Z
|
soapfish/soap.py
|
FelixSchwarz/soapfish
|
d98a1d7e8b92897620810e75de97de598140fafb
|
[
"BSD-3-Clause"
] | 42
|
2015-01-17T18:54:50.000Z
|
2016-06-28T07:06:18.000Z
|
soapfish/soap.py
|
soapteam/soapfish
|
3f9b531d5cf14a063edbda16a62ace23fabe6d06
|
[
"BSD-3-Clause"
] | 13
|
2016-09-02T09:37:42.000Z
|
2021-11-29T13:11:33.000Z
|
"""SOAP protocol implementation, dispatchers and client stub."""
import logging
import string
import httpx
from . import core, namespaces as ns, soap11, soap12, wsa
from .utils import uncapitalize
SOAP_HTTP_Transport = ns.wsdl_soap_http
logger = logging.getLogger('soapfish')
class SOAPVersion:
SOAP11 = soap11
SOAP12 = soap12
@classmethod
def get_version(cls, namespace):
if namespace == cls.SOAP11.ENVELOPE_NAMESPACE or namespace == cls.SOAP11.BINDING_NAMESPACE:
return cls.SOAP11
elif namespace == cls.SOAP12.ENVELOPE_NAMESPACE or namespace == cls.SOAP12.BINDING_NAMESPACE:
return cls.SOAP12
else:
raise ValueError(f"SOAP version with namespace '{namespace}' is not supported.")
@classmethod
def get_version_name(cls, namespace):
return cls.get_version(namespace).__name__
@classmethod
def get_version_from_xml(cls, xml):
namespaces = {'wsdl': ns.wsdl, 'soap12': ns.wsdl_soap12}
return cls.SOAP12 if xml.xpath('wsdl:binding/soap12:binding', namespaces=namespaces) else cls.SOAP11
class Service:
"""Describe service aggregating information required for dispatching and WSDL generation."""
def __init__(self, targetNamespace, location, schemas, methods, version=SOAPVersion.SOAP11, name='Service',
input_header=None, output_header=None, use_wsa=False):
self.name = name
self.targetNamespace = targetNamespace
self.location = location
self.schemas = schemas
self.methods = methods
self.version = version
self.use_wsa = use_wsa
if use_wsa and input_header is None:
input_header = wsa.WSAsaHeader
if use_wsa and output_header is None:
output_header = wsa.WSAHeader
self.input_header = input_header
self.output_header = output_header
def get_method(self, operationName):
return next(m for m in self.methods if m.operationName == operationName)
def find_element_by_name(self, name):
element = None
for schema in self.schemas:
element = schema.get_element_by_name(name)
if element is not None:
break
return element
def route(self, operationName):
"""Return a decorator that binds a Python function to service method."""
method = self.get_method(operationName)
def wrapper(func):
method.function = func
return func
return wrapper
class Stub:
"""Client stub. Handles only document style calls."""
SERVICE = None
SCHEME = 'http'
HOST = 'www.example.net'
def __init__(self, username=None, password=None, service=None, location=None):
self.username = username
self.password = password
self.service = service if service else self.SERVICE
context = {'scheme': self.SCHEME, 'host': self.HOST}
if location is None:
location = lambda template, context: string.Template(template).safe_substitute(**context)
if callable(location):
self.location = location(self.service.location, context)
elif isinstance(location, str):
self.location = location
else:
raise TypeError('Expected string or callable for location.')
def _handle_response(self, method, http_headers, content):
soap = self.service.version
envelope = soap.Envelope.parsexml(content)
if envelope.Header and method and method.output_header:
response_header = envelope.Header.parse_as(method.output_header)
else:
response_header = None
if envelope.Body.Fault:
code, message, actor = soap.parse_fault_message(envelope.Body.Fault)
error = core.SOAPError(code=code, message=message, actor=actor)
raise error
if isinstance(method.output, str):
_type = self.service.find_element_by_name(method.output)._type.__class__
else:
_type = method.output
body = envelope.Body.parse_as(_type)
return core.SOAPResponse(body, soap_header=response_header)
def call(self, operationName, parameter, header=None):
soap = self.service.version
method = self.service.get_method(operationName)
tagname = method.input if isinstance(method.input, str) else uncapitalize(parameter.__class__.__name__)
auth = (self.username, self.password) if self.username else None
data = soap.Envelope.response(tagname, parameter, header=header)
headers = soap.build_http_request_headers(method.soapAction)
logger.info("Call '%s' on '%s'", operationName, self.location)
logger.debug('Request Headers: %s', headers)
logger.debug('Request Envelope: %s', data)
r = httpx.post(self.location, auth=auth, headers=headers, data=data)
logger.debug('Response Headers: %s', r.headers)
logger.debug('Response Envelope: %s', r.content)
return self._handle_response(method, r.headers, r.content)
| 36.198582
| 111
| 0.666732
|
feb754e95f28c5b1e7d9157179f2d2f45d18d443
| 13,489
|
py
|
Python
|
venv/Lib/site-packages/pandas/compat/numpy/function.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/compat/numpy/function.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/compat/numpy/function.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
"""
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
from typing import Any, Dict, Optional, Union
from numpy import __version__ as _np_version, ndarray
from pandas._libs.lib import is_bool, is_integer
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args,
validate_args_and_kwargs,
validate_kwargs,
)
class CompatValidator:
def __init__(self, defaults, fname=None, method=None, max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (
self.max_fname_arg_count
if max_fname_arg_count is None
else max_fname_arg_count
)
method = self.method if method is None else method
if method == "args":
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == "kwargs":
validate_kwargs(fname, kwargs, self.defaults)
elif method == "both":
validate_args_and_kwargs(
fname, args, kwargs, max_fname_arg_count, self.defaults
)
else:
raise ValueError(f"invalid validation method '{method}'")
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
)
validate_argmax = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS: "OrderedDict[str, Optional[Union[int, str]]]" = OrderedDict()
ARGSORT_DEFAULTS["axis"] = -1
ARGSORT_DEFAULTS["kind"] = "quicksort"
ARGSORT_DEFAULTS["order"] = None
if LooseVersion(_np_version) >= LooseVersion("1.17.0"):
# GH-26361. NumPy added radix sort and changed default to None.
ARGSORT_DEFAULTS["kind"] = None
validate_argsort = CompatValidator(
ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
)
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND: "OrderedDict[str, Optional[int]]" = OrderedDict()
ARGSORT_DEFAULTS_KIND["axis"] = -1
ARGSORT_DEFAULTS_KIND["order"] = None
validate_argsort_kind = CompatValidator(
ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
)
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None) # type Dict[str, Any]
validate_clip = CompatValidator(
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
CUM_FUNC_DEFAULTS: "OrderedDict[str, Any]" = OrderedDict()
CUM_FUNC_DEFAULTS["dtype"] = None
CUM_FUNC_DEFAULTS["out"] = None
validate_cum_func = CompatValidator(
CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
)
validate_cumsum = CompatValidator(
CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS: "OrderedDict[str, Optional[bool]]" = OrderedDict()
ALLANY_DEFAULTS["dtype"] = None
ALLANY_DEFAULTS["out"] = None
ALLANY_DEFAULTS["keepdims"] = False
validate_all = CompatValidator(
ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
)
validate_any = CompatValidator(
ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
)
LOGICAL_FUNC_DEFAULTS = dict(out=None, keepdims=False)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
MINMAX_DEFAULTS = dict(out=None, keepdims=False)
validate_min = CompatValidator(
MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
)
validate_max = CompatValidator(
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)
RESHAPE_DEFAULTS: Dict[str, str] = dict(order="C")
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)
REPEAT_DEFAULTS: Dict[str, Any] = dict(axis=None)
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)
ROUND_DEFAULTS: Dict[str, Any] = dict(out=None)
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)
SORT_DEFAULTS: "OrderedDict[str, Optional[Union[int, str]]]" = OrderedDict()
SORT_DEFAULTS["axis"] = -1
SORT_DEFAULTS["kind"] = "quicksort"
SORT_DEFAULTS["order"] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
STAT_FUNC_DEFAULTS: "OrderedDict[str, Optional[Any]]" = OrderedDict()
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None
PROD_DEFAULTS = SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
SUM_DEFAULTS["keepdims"] = False
SUM_DEFAULTS["initial"] = None
MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
MEDIAN_DEFAULTS["overwrite_input"] = False
MEDIAN_DEFAULTS["keepdims"] = False
STAT_FUNC_DEFAULTS["keepdims"] = False
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
validate_sum = CompatValidator(
SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
)
validate_prod = CompatValidator(
PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
)
validate_mean = CompatValidator(
STAT_FUNC_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
)
validate_median = CompatValidator(
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
)
STAT_DDOF_FUNC_DEFAULTS: "OrderedDict[str, Optional[bool]]" = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
STAT_DDOF_FUNC_DEFAULTS["out"] = None
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
TAKE_DEFAULTS: "OrderedDict[str, Optional[str]]" = OrderedDict()
TAKE_DEFAULTS["out"] = None
TAKE_DEFAULTS["mode"] = "raise"
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method="both")
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
)
def validate_window_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
f"Use .{name}() directly instead "
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
f"Use .rolling(...).{name}() instead "
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
f"Use .expanding(...).{name}() instead "
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall(
f"numpy operations are not valid with "
f"groupby. Use .groupby(...).{name}() "
f"instead"
)
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall(
f"numpy operations are not "
f"valid with resample. Use "
f".resample(...).{method}() instead"
)
else:
raise TypeError("too many arguments passed in")
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})")
| 33.224138
| 89
| 0.679072
|
f7b6fd9672253f01677a2eba90ab28cee96578b7
| 93,901
|
py
|
Python
|
test/steps/v2_steps.py
|
SubstructureDek/py-algorand-sdk
|
ddbbe7efc9f23e5d67dbf16bcb6465bfec234f6a
|
[
"MIT"
] | null | null | null |
test/steps/v2_steps.py
|
SubstructureDek/py-algorand-sdk
|
ddbbe7efc9f23e5d67dbf16bcb6465bfec234f6a
|
[
"MIT"
] | null | null | null |
test/steps/v2_steps.py
|
SubstructureDek/py-algorand-sdk
|
ddbbe7efc9f23e5d67dbf16bcb6465bfec234f6a
|
[
"MIT"
] | null | null | null |
import base64
import json
import os
import urllib
import unittest
from datetime import datetime
from urllib.request import Request, urlopen
from algosdk.abi.contract import NetworkInfo
import parse
from behave import (
given,
when,
then,
register_type,
step,
) # pylint: disable=no-name-in-module
from algosdk.future import transaction
from algosdk import (
abi,
account,
atomic_transaction_composer,
encoding,
error,
mnemonic,
)
from algosdk.v2client import *
from algosdk.v2client.models import (
DryrunRequest,
DryrunSource,
Account,
Application,
ApplicationLocalState,
)
from algosdk.error import AlgodHTTPError, IndexerHTTPError
from algosdk.testing.dryrun import DryrunTestCaseMixin
from test.steps.steps import token as daemon_token
from test.steps.steps import algod_port
@parse.with_pattern(r".*")
def parse_string(text):
return text
register_type(MaybeString=parse_string)
@parse.with_pattern(r"true|false")
def parse_bool(value):
if value not in ("true", "false"):
raise ValueError("Unknown value for include_all: {}".format(value))
return value == "true"
register_type(MaybeBool=parse_bool)
@given("mock server recording request paths")
def setup_mockserver(context):
context.url = "http://127.0.0.1:" + str(context.path_server_port)
context.acl = algod.AlgodClient("algod_token", context.url)
context.icl = indexer.IndexerClient("indexer_token", context.url)
@given('mock http responses in "{jsonfiles}" loaded from "{directory}"')
def mock_response(context, jsonfiles, directory):
context.url = "http://127.0.0.1:" + str(context.response_server_port)
context.acl = algod.AlgodClient("algod_token", context.url)
context.icl = indexer.IndexerClient("indexer_token", context.url)
# The mock server writes this response to a file, on a regular request
# that file is read.
# It's an interesting approach, but currently doesn't support setting
# the content type, or different return codes. This will require a bit
# of extra work when/if we support the different error cases.
#
# Take a look at 'environment.py' to see the mock servers.
req = Request(
context.url + "/mock/" + directory + "/" + jsonfiles, method="GET"
)
urlopen(req)
@given(
'mock http responses in "{filename}" loaded from "{directory}" with status {status}.'
)
def step_impl(context, filename, directory, status):
context.expected_status_code = int(status)
with open("test/features/resources/mock_response_status", "w") as f:
f.write(status)
mock_response(context, filename, directory)
f = open("test/features/resources/mock_response_path", "r")
mock_response_path = f.read()
f.close()
f = open("test/features/resources/" + mock_response_path, "r")
expected_mock_response = f.read()
f.close()
expected_mock_response = bytes(expected_mock_response, "ascii")
context.expected_mock_response = json.loads(expected_mock_response)
def validate_error(context, err):
if context.expected_status_code != 200:
if context.expected_status_code == 500:
assert context.expected_mock_response["message"] == err.args[0], (
context.expected_mock_response,
err.args[0],
)
else:
raise NotImplementedError(
"test does not know how to validate status code "
+ context.expected_status_code
)
else:
raise err
@when('we make any "{client}" call to "{endpoint}".')
def step_impl(context, client, endpoint):
# with the current implementation of mock responses, there is no need to do an 'endpoint' lookup
if client == "indexer":
try:
context.response = context.icl.health()
except error.IndexerHTTPError as err:
validate_error(context, err)
elif client == "algod":
try:
context.response = context.acl.status()
except error.AlgodHTTPError as err:
validate_error(context, err)
else:
raise NotImplementedError('did not recognize client "' + client + '"')
@then("the parsed response should equal the mock response.")
def step_impl(context):
if context.expected_status_code == 200:
assert context.expected_mock_response == context.response
@when(
'we make a Pending Transaction Information against txid "{txid}" with format "{response_format}"'
)
def pending_txn_info(context, txid, response_format):
context.response = context.acl.pending_transaction_info(
txid, response_format=response_format
)
@when(
'we make a Pending Transaction Information with max {max} and format "{response_format}"'
)
def pending_txn_with_max(context, max, response_format):
context.response = context.acl.pending_transactions(
int(max), response_format=response_format
)
@when("we make any Pending Transactions Information call")
def pending_txn_any(context):
context.response = context.acl.pending_transactions(
100, response_format="msgpack"
)
@when("we make any Pending Transaction Information call")
def pending_txn_any2(context):
context.response = context.acl.pending_transaction_info(
"sdfsf", response_format="msgpack"
)
@then(
'the parsed Pending Transaction Information response should have sender "{sender}"'
)
def parse_pending_txn(context, sender):
context.response = json.loads(context.response)
assert (
encoding.encode_address(
base64.b64decode(context.response["txn"]["txn"]["snd"])
)
== sender
)
@then(
'the parsed Pending Transactions Information response should contain an array of len {length} and element number {idx} should have sender "{sender}"'
)
def parse_pending_txns(context, length, idx, sender):
context.response = json.loads(context.response)
assert len(context.response["top-transactions"]) == int(length)
assert (
encoding.encode_address(
base64.b64decode(
context.response["top-transactions"][int(idx)]["txn"]["snd"]
)
)
== sender
)
@when(
'we make a Pending Transactions By Address call against account "{account}" and max {max} and format "{response_format}"'
)
def pending_txns_by_addr(context, account, max, response_format):
context.response = context.acl.pending_transactions_by_address(
account, limit=int(max), response_format=response_format
)
@when("we make any Pending Transactions By Address call")
def pending_txns_by_addr_any(context):
context.response = context.acl.pending_transactions_by_address(
"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI",
response_format="msgpack",
)
@then(
'the parsed Pending Transactions By Address response should contain an array of len {length} and element number {idx} should have sender "{sender}"'
)
def parse_pend_by_addr(context, length, idx, sender):
context.response = json.loads(context.response)
assert len(context.response["top-transactions"]) == int(length)
assert (
encoding.encode_address(
base64.b64decode(
context.response["top-transactions"][int(idx)]["txn"]["snd"]
)
)
== sender
)
@when("we make any Send Raw Transaction call")
def send_any(context):
context.response = context.acl.send_raw_transaction("Bg==")
@then('the parsed Send Raw Transaction response should have txid "{txid}"')
def parsed_send(context, txid):
assert context.response == txid
@when("we make any Node Status call")
def status_any(context):
context.response = context.acl.status()
@then("the parsed Node Status response should have a last round of {roundNum}")
def parse_status(context, roundNum):
assert context.response["last-round"] == int(roundNum)
@when("we make a Status after Block call with round {block}")
def status_after(context, block):
context.response = context.acl.status_after_block(int(block))
@when("we make any Status After Block call")
def status_after_any(context):
context.response = context.acl.status_after_block(3)
@then(
"the parsed Status After Block response should have a last round of {roundNum}"
)
def parse_status_after(context, roundNum):
assert context.response["last-round"] == int(roundNum)
@when("we make any Ledger Supply call")
def ledger_any(context):
context.response = context.acl.ledger_supply()
@then(
"the parsed Ledger Supply response should have totalMoney {tot} onlineMoney {online} on round {roundNum}"
)
def parse_ledger(context, tot, online, roundNum):
assert context.response["online-money"] == int(online)
assert context.response["total-money"] == int(tot)
assert context.response["current_round"] == int(roundNum)
@when('we make an Account Information call against account "{account}"')
def acc_info(context, account):
context.response = context.acl.account_info(account)
@when("we make any Account Information call")
def acc_info_any(context):
context.response = context.acl.account_info(
"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI"
)
@then(
'the parsed Account Information response should have address "{address}"'
)
def parse_acc_info(context, address):
assert context.response["address"] == address
@when("we make a GetAssetByID call for assetID {asset_id}")
def asset_info(context, asset_id):
context.response = context.acl.asset_info(int(asset_id))
@when("we make a GetApplicationByID call for applicationID {app_id}")
def application_info(context, app_id):
context.response = context.acl.application_info(int(app_id))
@when(
'we make a Get Block call against block number {block} with format "{response_format}"'
)
def block(context, block, response_format):
context.response = context.acl.block_info(
int(block), response_format=response_format
)
@when("we make any Get Block call")
def block_any(context):
context.response = context.acl.block_info(3, response_format="msgpack")
@then('the parsed Get Block response should have rewards pool "{pool}"')
def parse_block(context, pool):
context.response = json.loads(context.response)
assert context.response["block"]["rwd"] == pool
@when(
"I get the next page using {indexer} to lookup asset balances for {assetid} with {currencygt}, {currencylt}, {limit}"
)
def next_asset_balance(
context, indexer, assetid, currencygt, currencylt, limit
):
context.response = context.icls[indexer].asset_balances(
int(assetid),
min_balance=int(currencygt),
max_balance=int(currencylt),
limit=int(limit),
next_page=context.response["next-token"],
)
@then(
'There are {numaccounts} with the asset, the first is "{account}" has "{isfrozen}" and {amount}'
)
def check_asset_balance(context, numaccounts, account, isfrozen, amount):
assert len(context.response["balances"]) == int(numaccounts)
assert context.response["balances"][0]["address"] == account
assert context.response["balances"][0]["amount"] == int(amount)
assert context.response["balances"][0]["is-frozen"] == (isfrozen == "true")
@when(
'we make a Lookup Asset Balances call against asset index {index} with limit {limit} afterAddress "{afterAddress:MaybeString}" round {block} currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan}'
)
def asset_balance(
context,
index,
limit,
afterAddress,
block,
currencyGreaterThan,
currencyLessThan,
):
context.response = context.icl.asset_balances(
int(index),
int(limit),
next_page=None,
min_balance=int(currencyGreaterThan),
max_balance=int(currencyLessThan),
block=int(block),
)
@when("we make any LookupAssetBalances call")
def asset_balance_any(context):
context.response = context.icl.asset_balances(123, 10)
@then(
'the parsed LookupAssetBalances response should be valid on round {roundNum}, and contain an array of len {length} and element number {idx} should have address "{address}" amount {amount} and frozen state "{frozenState}"'
)
def parse_asset_balance(
context, roundNum, length, idx, address, amount, frozenState
):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["balances"]) == int(length)
assert context.response["balances"][int(idx)]["address"] == address
assert context.response["balances"][int(idx)]["amount"] == int(amount)
assert context.response["balances"][int(idx)]["is-frozen"] == (
frozenState == "true"
)
@when("I use {indexer} to search for all {assetid} asset transactions")
def icl_asset_txns(context, indexer, assetid):
context.response = context.icls[indexer].search_asset_transactions(
int(assetid)
)
@when(
'we make a Lookup Asset Transactions call against asset index {index} with NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} address "{address:MaybeString}" addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}" RekeyTo "{rekeyTo:MaybeString}"'
)
def asset_txns(
context,
index,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
address,
addressRole,
excludeCloseTo,
rekeyTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if address == "none":
address = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
if rekeyTo == "none":
rekeyTo = None
context.response = context.icl.search_asset_transactions(
int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=address,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=rekeyTo,
)
@when(
'we make a Lookup Asset Transactions call against asset index {index} with NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} address "{address:MaybeString}" addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}"'
)
def step_impl(
context,
index,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
address,
addressRole,
excludeCloseTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if address == "none":
address = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
context.response = context.icl.search_asset_transactions(
int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=address,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=None,
)
@when("we make any LookupAssetTransactions call")
def asset_txns_any(context):
context.response = context.icl.search_asset_transactions(32)
@then(
'the parsed LookupAssetTransactions response should be valid on round {roundNum}, and contain an array of len {length} and element number {idx} should have sender "{sender}"'
)
def parse_asset_tns(context, roundNum, length, idx, sender):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
assert context.response["transactions"][int(idx)]["sender"] == sender
@when('I use {indexer} to search for all "{accountid}" transactions')
def icl_txns_by_addr(context, indexer, accountid):
context.response = context.icls[indexer].search_transactions_by_address(
accountid
)
@when(
'we make a Lookup Account Transactions call against account "{account:MaybeString}" with NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index} rekeyTo "{rekeyTo:MaybeString}"'
)
def txns_by_addr(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
rekeyTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if rekeyTo == "none":
rekeyTo = None
context.response = context.icl.search_transactions_by_address(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
rekey_to=rekeyTo,
)
@when(
'we make a Lookup Account Transactions call against account "{account:MaybeString}" with NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index}'
)
def txns_by_addr(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
context.response = context.icl.search_transactions_by_address(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
rekey_to=None,
)
@when("we make any LookupAccountTransactions call")
def txns_by_addr_any(context):
context.response = context.icl.search_transactions_by_address(
"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI"
)
@then(
'the parsed LookupAccountTransactions response should be valid on round {roundNum}, and contain an array of len {length} and element number {idx} should have sender "{sender}"'
)
def parse_txns_by_addr(context, roundNum, length, idx, sender):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
if int(length) > 0:
assert context.response["transactions"][int(idx)]["sender"] == sender
@when("I use {indexer} to check the services health")
def icl_health(context, indexer):
context.response = context.icls[indexer].health()
@then("I receive status code {code}")
def icl_health_check(context, code):
# An exception is thrown when the code is not 200
assert int(code) == 200
@when("I use {indexer} to lookup block {number}")
def icl_lookup_block(context, indexer, number):
context.response = context.icls[indexer].block_info(int(number))
@then(
'The block was confirmed at {timestamp}, contains {num} transactions, has the previous block hash "{prevHash}"'
)
def icl_block_check(context, timestamp, num, prevHash):
assert context.response["previous-block-hash"] == prevHash
assert len(context.response["transactions"]) == int(num)
assert context.response["timestamp"] == int(timestamp)
@when("we make a Lookup Block call against round {block}")
def lookup_block(context, block):
context.response = context.icl.block_info(int(block))
@when("we make any LookupBlock call")
def lookup_block_any(context):
context.response = context.icl.block_info(12)
@then(
'the parsed LookupBlock response should have previous block hash "{prevHash}"'
)
def parse_lookup_block(context, prevHash):
assert context.response["previous-block-hash"] == prevHash
@then(
'The account has {num} assets, the first is asset {index} has a frozen status of "{frozen}" and amount {units}.'
)
def lookup_account_check(context, num, index, frozen, units):
assert len(context.response["account"]["assets"]) == int(num)
assert context.response["account"]["assets"][0]["asset-id"] == int(index)
assert context.response["account"]["assets"][0]["is-frozen"] == (
frozen == "true"
)
assert context.response["account"]["assets"][0]["amount"] == int(units)
@then(
'The account created {num} assets, the first is asset {index} is named "{name}" with a total amount of {total} "{unit}"'
)
def lookup_account_check_created(context, num, index, name, total, unit):
assert len(context.response["account"]["created-assets"]) == int(num)
assert context.response["account"]["created-assets"][0]["index"] == int(
index
)
assert (
context.response["account"]["created-assets"][0]["params"]["name"]
== name
)
assert (
context.response["account"]["created-assets"][0]["params"]["unit-name"]
== unit
)
assert context.response["account"]["created-assets"][0]["params"][
"total"
] == int(total)
@then(
"The account has {μalgos} μalgos and {num} assets, {assetid} has {assetamount}"
)
def lookup_account_check_holdings(context, μalgos, num, assetid, assetamount):
assert context.response["account"]["amount"] == int(μalgos)
assert len(context.response["account"].get("assets", [])) == int(num)
if int(num) > 0:
assets = context.response["account"]["assets"]
for a in assets:
if a["asset-id"] == int(assetid):
assert a["amount"] == int(assetamount)
@when('I use {indexer} to lookup account "{account}" at round {round}')
def icl_lookup_account_at_round(context, indexer, account, round):
context.response = context.icls[indexer].account_info(account, int(round))
@when(
'we make a Lookup Account by ID call against account "{account}" with round {block}'
)
def lookup_account(context, account, block):
context.response = context.icl.account_info(account, int(block))
@when("we make any LookupAccountByID call")
def lookup_account_any(context):
context.response = context.icl.account_info(
"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI", 12
)
@then('the parsed LookupAccountByID response should have address "{address}"')
def parse_account(context, address):
assert context.response["account"]["address"] == address
@when(
'I use {indexer} to lookup asset balances for {assetid} with {currencygt}, {currencylt}, {limit} and token "{token}"'
)
def icl_asset_balance(
context, indexer, assetid, currencygt, currencylt, limit, token
):
context.response = context.icls[indexer].asset_balances(
int(assetid),
min_balance=int(currencygt),
max_balance=int(currencylt),
limit=int(limit),
next_page=token,
)
def parse_args(assetid):
t = assetid.split(" ")
l = {
"assetid": t[2],
"currencygt": t[4][:-1],
"currencylt": t[5][:-1],
"limit": t[6],
"token": t[9][1:-1],
}
return l
@when("I use {indexer} to lookup asset {assetid}")
def icl_lookup_asset(context, indexer, assetid):
try:
context.response = context.icls[indexer].asset_info(int(assetid))
except:
icl_asset_balance(context, indexer, **parse_args(assetid))
@then(
'The asset found has: "{name}", "{units}", "{creator}", {decimals}, "{defaultfrozen}", {total}, "{clawback}"'
)
def check_lookup_asset(
context, name, units, creator, decimals, defaultfrozen, total, clawback
):
assert context.response["asset"]["params"]["name"] == name
assert context.response["asset"]["params"]["unit-name"] == units
assert context.response["asset"]["params"]["creator"] == creator
assert context.response["asset"]["params"]["decimals"] == int(decimals)
assert context.response["asset"]["params"]["default-frozen"] == (
defaultfrozen == "true"
)
assert context.response["asset"]["params"]["total"] == int(total)
assert context.response["asset"]["params"]["clawback"] == clawback
@when("we make a Lookup Asset by ID call against asset index {index}")
def lookup_asset(context, index):
context.response = context.icl.asset_info(int(index))
@when("we make any LookupAssetByID call")
def lookup_asset_any(context):
context.response = context.icl.asset_info(1)
@then("the parsed LookupAssetByID response should have index {index}")
def parse_asset(context, index):
assert context.response["asset"]["index"] == int(index)
@when("we make a LookupApplications call with applicationID {app_id}")
def lookup_application(context, app_id):
context.response = context.icl.applications(int(app_id))
@when(
'we make a LookupApplicationLogsByID call with applicationID {app_id} limit {limit} minRound {min_round} maxRound {max_round} nextToken "{next_token:MaybeString}" sender "{sender:MaybeString}" and txID "{txid:MaybeString}"'
)
def lookup_application_logs(
context, app_id, limit, min_round, max_round, next_token, sender, txid
):
context.response = context.icl.application_logs(
int(app_id),
limit=int(limit),
min_round=int(min_round),
max_round=int(max_round),
next_page=next_token,
sender_addr=sender,
txid=txid,
)
@when("we make a SearchForApplications call with applicationID {app_id}")
def search_application(context, app_id):
context.response = context.icl.search_applications(int(app_id))
@when(
"we make a Search Accounts call with assetID {index} limit {limit} currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} and round {block}"
)
def search_accounts(
context, index, limit, currencyGreaterThan, currencyLessThan, block
):
context.response = context.icl.accounts(
asset_id=int(index),
limit=int(limit),
next_page=None,
min_balance=int(currencyGreaterThan),
max_balance=int(currencyLessThan),
block=int(block),
)
@when(
'we make a Search Accounts call with assetID {index} limit {limit} currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} round {block} and authenticating address "{authAddr:MaybeString}"'
)
def search_accounts(
context,
index,
limit,
currencyGreaterThan,
currencyLessThan,
block,
authAddr,
):
if authAddr == "none":
authAddr = None
context.response = context.icl.accounts(
asset_id=int(index),
limit=int(limit),
next_page=None,
min_balance=int(currencyGreaterThan),
max_balance=int(currencyLessThan),
block=int(block),
auth_addr=authAddr,
)
@when(
'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt}, "{auth_addr:MaybeString}", {application_id}, "{include_all:MaybeBool}" and token "{token:MaybeString}"'
)
def icl_search_accounts_with_auth_addr_and_app_id_and_include_all(
context,
indexer,
assetid,
limit,
currencygt,
currencylt,
auth_addr,
application_id,
include_all,
token,
):
context.response = context.icls[indexer].accounts(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
min_balance=int(currencygt),
max_balance=int(currencylt),
auth_addr=auth_addr,
application_id=int(application_id),
include_all=include_all,
)
@when(
'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt}, "{auth_addr:MaybeString}", {application_id} and token "{token:MaybeString}"'
)
def icl_search_accounts_with_auth_addr_and_app_id(
context,
indexer,
assetid,
limit,
currencygt,
currencylt,
auth_addr,
application_id,
token,
):
context.response = context.icls[indexer].accounts(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
min_balance=int(currencygt),
max_balance=int(currencylt),
auth_addr=auth_addr,
application_id=int(application_id),
)
@when(
'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt} and token "{token:MaybeString}"'
)
def icl_search_accounts_legacy(
context, indexer, assetid, limit, currencygt, currencylt, token
):
context.response = context.icls[indexer].accounts(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
min_balance=int(currencygt),
max_balance=int(currencylt),
)
@then(
"I get the next page using {indexer} to search for an account with {assetid}, {limit}, {currencygt} and {currencylt}"
)
def search_accounts_nex(
context, indexer, assetid, limit, currencygt, currencylt
):
context.response = context.icls[indexer].accounts(
asset_id=int(assetid),
limit=int(limit),
min_balance=int(currencygt),
max_balance=int(currencylt),
next_page=context.response["next-token"],
)
@then(
'There are {num}, the first has {pendingrewards}, {rewardsbase}, {rewards}, {withoutrewards}, "{address}", {amount}, "{status}", "{sigtype:MaybeString}"'
)
def check_search_accounts(
context,
num,
pendingrewards,
rewardsbase,
rewards,
withoutrewards,
address,
amount,
status,
sigtype,
):
assert len(context.response["accounts"]) == int(num)
assert context.response["accounts"][0]["pending-rewards"] == int(
pendingrewards
)
assert context.response["accounts"][0].get("rewards-base", 0) == int(
rewardsbase
)
assert context.response["accounts"][0]["rewards"] == int(rewards)
assert context.response["accounts"][0][
"amount-without-pending-rewards"
] == int(withoutrewards)
assert context.response["accounts"][0]["address"] == address
assert context.response["accounts"][0]["amount"] == int(amount)
assert context.response["accounts"][0]["status"] == status
assert context.response["accounts"][0].get("sig-type", "") == sigtype
@then(
'The first account is online and has "{address}", {keydilution}, {firstvalid}, {lastvalid}, "{votekey}", "{selectionkey}"'
)
def check_search_accounts_online(
context, address, keydilution, firstvalid, lastvalid, votekey, selectionkey
):
assert context.response["accounts"][0]["status"] == "Online"
assert context.response["accounts"][0]["address"] == address
assert context.response["accounts"][0]["participation"][
"vote-key-dilution"
] == int(keydilution)
assert context.response["accounts"][0]["participation"][
"vote-first-valid"
] == int(firstvalid)
assert context.response["accounts"][0]["participation"][
"vote-last-valid"
] == int(lastvalid)
assert (
context.response["accounts"][0]["participation"][
"vote-participation-key"
]
== votekey
)
assert (
context.response["accounts"][0]["participation"][
"selection-participation-key"
]
== selectionkey
)
@when("we make any SearchAccounts call")
def search_accounts_any(context):
context.response = context.icl.accounts(asset_id=2)
@then(
'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have address "{address}"'
)
def parse_accounts(context, roundNum, length, index, address):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["accounts"]) == int(length)
if int(length) > 0:
assert context.response["accounts"][int(index)]["address"] == address
@when(
'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have authorizing address "{authAddr:MaybeString}"'
)
def parse_accounts_auth(context, roundNum, length, index, authAddr):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["accounts"]) == int(length)
if int(length) > 0:
assert (
context.response["accounts"][int(index)]["auth-addr"] == authAddr
)
@when(
"I get the next page using {indexer} to search for transactions with {limit} and {maxround}"
)
def search_txns_next(context, indexer, limit, maxround):
context.response = context.icls[indexer].search_transactions(
limit=int(limit),
max_round=int(maxround),
next_page=context.response["next-token"],
)
@when(
'I use {indexer} to search for transactions with {limit}, "{noteprefix:MaybeString}", "{txtype:MaybeString}", "{sigtype:MaybeString}", "{txid:MaybeString}", {block}, {minround}, {maxround}, {assetid}, "{beforetime:MaybeString}", "{aftertime:MaybeString}", {currencygt}, {currencylt}, "{address:MaybeString}", "{addressrole:MaybeString}", "{excludecloseto:MaybeString}" and token "{token:MaybeString}"'
)
def icl_search_txns(
context,
indexer,
limit,
noteprefix,
txtype,
sigtype,
txid,
block,
minround,
maxround,
assetid,
beforetime,
aftertime,
currencygt,
currencylt,
address,
addressrole,
excludecloseto,
token,
):
context.response = context.icls[indexer].search_transactions(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
note_prefix=base64.b64decode(noteprefix),
txn_type=txtype,
sig_type=sigtype,
txid=txid,
block=int(block),
min_round=int(minround),
max_round=int(maxround),
start_time=aftertime,
end_time=beforetime,
min_amount=int(currencygt),
max_amount=int(currencylt),
address=address,
address_role=addressrole,
exclude_close_to=excludecloseto == "true",
)
@when(
'I use {indexer} to search for transactions with {limit}, "{noteprefix:MaybeString}", "{txtype:MaybeString}", "{sigtype:MaybeString}", "{txid:MaybeString}", {block}, {minround}, {maxround}, {assetid}, "{beforetime:MaybeString}", "{aftertime:MaybeString}", {currencygt}, {currencylt}, "{address:MaybeString}", "{addressrole:MaybeString}", "{excludecloseto:MaybeString}", {application_id} and token "{token:MaybeString}"'
)
def icl_search_txns_with_app(
context,
indexer,
limit,
noteprefix,
txtype,
sigtype,
txid,
block,
minround,
maxround,
assetid,
beforetime,
aftertime,
currencygt,
currencylt,
address,
addressrole,
excludecloseto,
application_id,
token,
):
context.response = context.icls[indexer].search_transactions(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
note_prefix=base64.b64decode(noteprefix),
txn_type=txtype,
sig_type=sigtype,
txid=txid,
block=int(block),
min_round=int(minround),
max_round=int(maxround),
start_time=aftertime,
end_time=beforetime,
min_amount=int(currencygt),
max_amount=int(currencylt),
address=address,
address_role=addressrole,
application_id=int(application_id),
exclude_close_to=excludecloseto == "true",
)
@then(
'there are {num} transactions in the response, the first is "{txid:MaybeString}".'
)
def check_transactions(context, num, txid):
assert len(context.response["transactions"]) == int(num)
if int(num) > 0:
assert context.response["transactions"][0]["id"] == txid
@then('Every transaction has tx-type "{txtype}"')
def check_transaction_types(context, txtype):
for txn in context.response["transactions"]:
assert txn["tx-type"] == txtype
@then('Every transaction has sig-type "{sigtype}"')
def check_sig_types(context, sigtype):
for txn in context.response["transactions"]:
if sigtype == "lsig":
assert list(txn["signature"].keys())[0] == "logicsig"
if sigtype == "msig":
assert list(txn["signature"].keys())[0] == "multisig"
if sigtype == "sig":
assert list(txn["signature"].keys())[0] == sigtype
@then("Every transaction has round >= {minround}")
def check_minround(context, minround):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] >= int(minround)
@then("Every transaction has round <= {maxround}")
def check_maxround(context, maxround):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] <= int(maxround)
@then("Every transaction has round {block}")
def check_round(context, block):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] == int(block)
@then("Every transaction works with asset-id {assetid}")
def check_assetid(context, assetid):
for txn in context.response["transactions"]:
if "asset-config-transaction" in txn:
subtxn = txn["asset-config-transaction"]
else:
subtxn = txn["asset-transfer-transaction"]
assert subtxn["asset-id"] == int(assetid) or txn[
"created-asset-index"
] == int(assetid)
@then('Every transaction is older than "{before}"')
def check_before(context, before):
for txn in context.response["transactions"]:
t = datetime.fromisoformat(before.replace("Z", "+00:00"))
assert txn["round-time"] <= datetime.timestamp(t)
@then('Every transaction is newer than "{after}"')
def check_after(context, after):
t = True
for txn in context.response["transactions"]:
t = datetime.fromisoformat(after.replace("Z", "+00:00"))
if not txn["round-time"] >= datetime.timestamp(t):
t = False
assert t
@then("Every transaction moves between {currencygt} and {currencylt} currency")
def check_currency(context, currencygt, currencylt):
for txn in context.response["transactions"]:
amt = 0
if "asset-transfer-transaction" in txn:
amt = txn["asset-transfer-transaction"]["amount"]
else:
amt = txn["payment-transaction"]["amount"]
if int(currencygt) == 0:
if int(currencylt) > 0:
assert amt <= int(currencylt)
else:
if int(currencylt) > 0:
assert int(currencygt) <= amt <= int(currencylt)
else:
assert int(currencygt) <= amt
@when(
'we make a Search For Transactions call with account "{account:MaybeString}" NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index} addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}" rekeyTo "{rekeyTo:MaybeString}"'
)
def search_txns(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
addressRole,
excludeCloseTo,
rekeyTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if account == "none":
account = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
if rekeyTo == "none":
rekeyTo = None
context.response = context.icl.search_transactions(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=rekeyTo,
)
@when(
'we make a Search For Transactions call with account "{account:MaybeString}" NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index} addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}"'
)
def search_txns(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
addressRole,
excludeCloseTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if account == "none":
account = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
context.response = context.icl.search_transactions(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=None,
)
@when("we make any SearchForTransactions call")
def search_txns_any(context):
context.response = context.icl.search_transactions(asset_id=2)
@then(
'the parsed SearchForTransactions response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have sender "{sender}"'
)
def parse_search_txns(context, roundNum, length, index, sender):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
if int(length) > 0:
assert context.response["transactions"][int(index)]["sender"] == sender
@when(
'the parsed SearchForTransactions response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have rekey-to "{rekeyTo:MaybeString}"'
)
def step_impl(context, roundNum, length, index, rekeyTo):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
if int(length) > 0:
assert (
context.response["transactions"][int(index)]["rekey-to"] == rekeyTo
)
@when(
'I use {indexer} to search for assets with {limit}, {assetidin}, "{creator:MaybeString}", "{name:MaybeString}", "{unit:MaybeString}", and token "{token:MaybeString}"'
)
def icl_search_assets(
context, indexer, limit, assetidin, creator, name, unit, token
):
context.response = context.icls[indexer].search_assets(
limit=int(limit),
next_page=token,
creator=creator,
name=name,
unit=unit,
asset_id=int(assetidin),
)
@then("there are {num} assets in the response, the first is {assetidout}.")
def check_assets(context, num, assetidout):
assert len(context.response["assets"]) == int(num)
if int(num) > 0:
assert context.response["assets"][0]["index"] == int(assetidout)
@when(
'I use {indexer} to search for applications with {limit}, {application_id}, "{include_all:MaybeBool}" and token "{token:MaybeString}"'
)
def search_applications_include_all(
context, indexer, limit, application_id, include_all, token
):
context.response = context.icls[indexer].search_applications(
application_id=int(application_id),
limit=int(limit),
include_all=include_all,
next_page=token,
)
@when(
'I use {indexer} to search for applications with {limit}, {application_id}, and token "{token:MaybeString}"'
)
def search_applications(context, indexer, limit, application_id, token):
context.response = context.icls[indexer].search_applications(
application_id=int(application_id), limit=int(limit), next_page=token
)
@when(
'I use {indexer} to lookup application with {application_id} and "{include_all:MaybeBool}"'
)
def lookup_application_include_all(
context, indexer, application_id, include_all
):
try:
context.response = context.icls[indexer].applications(
application_id=int(application_id), include_all=include_all
)
except IndexerHTTPError as e:
context.response = json.loads(str(e))
@when("I use {indexer} to lookup application with {application_id}")
def lookup_application(context, indexer, application_id):
context.response = context.icls[indexer].applications(
application_id=int(application_id)
)
@then('the parsed response should equal "{jsonfile}".')
def step_impl(context, jsonfile):
loaded_response = None
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
with open(dir_path + "/test/features/resources/" + jsonfile, "rb") as f:
loaded_response = bytearray(f.read())
# sort context.response
def recursively_sort_on_key(dictionary):
returned_dict = dict()
for k, v in sorted(dictionary.items()):
if isinstance(v, dict):
returned_dict[k] = recursively_sort_on_key(v)
elif isinstance(v, list) and all(
isinstance(item, dict) for item in v
):
if all("key" in item.keys() for item in v):
from operator import itemgetter
returned_dict[k] = sorted(v, key=itemgetter("key"))
else:
sorted_list = list()
for item in v:
sorted_list.append(recursively_sort_on_key(item))
returned_dict[k] = sorted_list
else:
returned_dict[k] = v
return returned_dict
context.response = recursively_sort_on_key(context.response)
loaded_response = recursively_sort_on_key(json.loads(loaded_response))
if context.response != loaded_response:
print("EXPECTED: " + str(loaded_response))
print("ACTUAL: " + str(context.response))
assert context.response == loaded_response
@when(
'we make a SearchForAssets call with limit {limit} creator "{creator:MaybeString}" name "{name:MaybeString}" unit "{unit:MaybeString}" index {index}'
)
def search_assets(context, limit, creator, name, unit, index):
if creator == "none":
creator = None
if name == "none":
name = None
if unit == "none":
unit = None
context.response = context.icl.search_assets(
limit=int(limit),
next_page=None,
creator=creator,
name=name,
unit=unit,
asset_id=int(index),
)
@when("we make any SearchForAssets call")
def search_assets_any(context):
context.response = context.icl.search_assets(asset_id=2)
@then(
"the parsed SearchForAssets response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have asset index {assetIndex}"
)
def parse_search_assets(context, roundNum, length, index, assetIndex):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["assets"]) == int(length)
if int(length) > 0:
assert context.response["assets"][int(index)]["index"] == int(
assetIndex
)
@when("we make any Suggested Transaction Parameters call")
def suggested_any(context):
context.response = context.acl.suggested_params()
@then(
"the parsed Suggested Transaction Parameters response should have first round valid of {roundNum}"
)
def parse_suggested(context, roundNum):
assert context.response.first == int(roundNum)
@then('expect the path used to be "{path}"')
def expect_path(context, path):
if not isinstance(context.response, dict):
try:
context.response = json.loads(context.response)
except json.JSONDecodeError:
pass
exp_path, exp_query = urllib.parse.splitquery(path)
exp_query = urllib.parse.parse_qs(exp_query)
actual_path, actual_query = urllib.parse.splitquery(
context.response["path"]
)
actual_query = urllib.parse.parse_qs(actual_query)
assert exp_path == actual_path.replace("%3A", ":")
assert exp_query == actual_query
@then('we expect the path used to be "{path}"')
def we_expect_path(context, path):
expect_path(context, path)
@then('expect error string to contain "{err:MaybeString}"')
def expect_error(context, err):
pass
@given(
'indexer client {index} at "{address}" port {port} with token "{token}"'
)
def indexer_client(context, index, address, port, token):
if not hasattr(context, "icls"):
context.icls = dict()
context.icls[index] = indexer.IndexerClient(
token, "http://" + address + ":" + str(port)
)
@when("we make a SearchForApplications call with {application_id} and {round}")
def search_applications(context, application_id, round):
context.response = context.icl.search_applications(
application_id=int(application_id), round=int(round)
)
@when("we make a LookupApplications call with {application_id} and {round}")
def lookup_applications(context, application_id, round):
context.response = context.icl.applications(
application_id=int(application_id), round=int(round)
)
@given('a signing account with address "{address}" and mnemonic "{mnemonic}"')
def signing_account(context, address, mnemonic):
context.signing_mnemonic = mnemonic
@given(
'suggested transaction parameters fee {fee}, flat-fee "{flat_fee:MaybeBool}", first-valid {first_valid}, last-valid {last_valid}, genesis-hash "{genesis_hash}", genesis-id "{genesis_id}"'
)
def suggested_transaction_parameters(
context, fee, flat_fee, first_valid, last_valid, genesis_hash, genesis_id
):
context.suggested_params = transaction.SuggestedParams(
fee=int(fee),
flat_fee=flat_fee,
first=int(first_valid),
last=int(last_valid),
gh=genesis_hash,
gen=genesis_id,
)
@given("suggested transaction parameters from the algod v2 client")
def get_sp_from_algod(context):
context.suggested_params = context.app_acl.suggested_params()
def operation_string_to_enum(operation):
if operation == "call":
return transaction.OnComplete.NoOpOC
elif operation == "create":
return transaction.OnComplete.NoOpOC
elif operation == "noop":
return transaction.OnComplete.NoOpOC
elif operation == "update":
return transaction.OnComplete.UpdateApplicationOC
elif operation == "optin":
return transaction.OnComplete.OptInOC
elif operation == "delete":
return transaction.OnComplete.DeleteApplicationOC
elif operation == "clear":
return transaction.OnComplete.ClearStateOC
elif operation == "closeout":
return transaction.OnComplete.CloseOutOC
else:
raise NotImplementedError(
"no oncomplete enum for operation " + operation
)
def split_and_process_app_args(in_args):
split_args = in_args.split(",")
sub_args = [sub_arg.split(":") for sub_arg in split_args]
app_args = []
for sub_arg in sub_args:
if sub_arg[0] == "str":
app_args.append(bytes(sub_arg[1], "ascii"))
elif sub_arg[0] == "int":
app_args.append(int(sub_arg[1]))
elif sub_arg[0] == "addr":
app_args.append(encoding.decode_address(sub_arg[1]))
return app_args
@step(
'I build a payment transaction with sender "{sender:MaybeString}", receiver "{receiver:MaybeString}", amount {amount}, close remainder to "{close_remainder_to:MaybeString}"'
)
def build_payment_transaction(
context, sender, receiver, amount, close_remainder_to
):
if sender == "transient":
sender = context.transient_pk
if receiver == "transient":
receiver = context.transient_pk
if not close_remainder_to:
close_remainder_to = None
context.transaction = transaction.PaymentTxn(
sender=sender,
sp=context.suggested_params,
receiver=receiver,
amt=int(amount),
close_remainder_to=close_remainder_to,
)
@when(
'I build an application transaction with operation "{operation:MaybeString}", application-id {application_id}, sender "{sender:MaybeString}", approval-program "{approval_program:MaybeString}", clear-program "{clear_program:MaybeString}", global-bytes {global_bytes}, global-ints {global_ints}, local-bytes {local_bytes}, local-ints {local_ints}, app-args "{app_args:MaybeString}", foreign-apps "{foreign_apps:MaybeString}", foreign-assets "{foreign_assets:MaybeString}", app-accounts "{app_accounts:MaybeString}", fee {fee}, first-valid {first_valid}, last-valid {last_valid}, genesis-hash "{genesis_hash:MaybeString}", extra-pages {extra_pages}'
)
def build_app_transaction(
context,
operation,
application_id,
sender,
approval_program,
clear_program,
global_bytes,
global_ints,
local_bytes,
local_ints,
app_args,
foreign_apps,
foreign_assets,
app_accounts,
fee,
first_valid,
last_valid,
genesis_hash,
extra_pages,
):
if operation == "none":
operation = None
else:
operation = operation_string_to_enum(operation)
if sender == "none":
sender = None
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program == "none":
approval_program = None
elif approval_program:
with open(
dir_path + "/test/features/resources/" + approval_program, "rb"
) as f:
approval_program = bytearray(f.read())
if clear_program == "none":
clear_program = None
elif clear_program:
with open(
dir_path + "/test/features/resources/" + clear_program, "rb"
) as f:
clear_program = bytearray(f.read())
if app_args == "none":
app_args = None
elif app_args:
app_args = split_and_process_app_args(app_args)
if foreign_apps == "none":
foreign_apps = None
elif foreign_apps:
foreign_apps = [int(app) for app in foreign_apps.split(",")]
if foreign_assets == "none":
foreign_assets = None
elif foreign_assets:
foreign_assets = [int(app) for app in foreign_assets.split(",")]
if app_accounts == "none":
app_accounts = None
elif app_accounts:
app_accounts = [
account_pubkey for account_pubkey in app_accounts.split(",")
]
if genesis_hash == "none":
genesis_hash = None
local_schema = transaction.StateSchema(
num_uints=int(local_ints), num_byte_slices=int(local_bytes)
)
global_schema = transaction.StateSchema(
num_uints=int(global_ints), num_byte_slices=int(global_bytes)
)
sp = transaction.SuggestedParams(
int(fee),
int(first_valid),
int(last_valid),
genesis_hash,
flat_fee=True,
)
context.transaction = transaction.ApplicationCallTxn(
sender=sender,
sp=sp,
index=int(application_id),
on_complete=operation,
local_schema=local_schema,
global_schema=global_schema,
approval_program=approval_program,
clear_program=clear_program,
app_args=app_args,
accounts=app_accounts,
foreign_apps=foreign_apps,
foreign_assets=foreign_assets,
extra_pages=int(extra_pages),
note=None,
lease=None,
rekey_to=None,
)
@when("sign the transaction")
def sign_transaction_with_signing_account(context):
private_key = mnemonic.to_private_key(context.signing_mnemonic)
context.signed_transaction = context.transaction.sign(private_key)
@then('the base64 encoded signed transactions should equal "{goldens}"')
def compare_stxns_array_to_base64_golden(context, goldens):
golden_strings = goldens.split(",")
assert len(golden_strings) == len(context.signed_transactions)
for i, golden in enumerate(golden_strings):
actual_base64 = encoding.msgpack_encode(context.signed_transactions[i])
assert golden == actual_base64, "actual is {}".format(actual_base64)
@then('the base64 encoded signed transaction should equal "{golden}"')
def compare_to_base64_golden(context, golden):
actual_base64 = encoding.msgpack_encode(context.signed_transaction)
assert golden == actual_base64, "actual is {}".format(actual_base64)
@then("the decoded transaction should equal the original")
def compare_to_original(context):
encoded = encoding.msgpack_encode(context.signed_transaction)
decoded = encoding.future_msgpack_decode(encoded)
assert decoded.transaction == context.transaction
@given(
'an algod v2 client connected to "{host}" port {port} with token "{token}"'
)
def algod_v2_client_at_host_port_and_token(context, host, port, token):
algod_address = "http://" + str(host) + ":" + str(port)
context.app_acl = algod.AlgodClient(token, algod_address)
@given("an algod v2 client")
def algod_v2_client(context):
algod_address = "http://localhost" + ":" + str(algod_port)
context.app_acl = algod.AlgodClient(daemon_token, algod_address)
@given(
"I create a new transient account and fund it with {transient_fund_amount} microalgos."
)
def create_transient_and_fund(context, transient_fund_amount):
context.transient_sk, context.transient_pk = account.generate_account()
sp = context.app_acl.suggested_params()
payment = transaction.PaymentTxn(
context.accounts[0],
sp,
context.transient_pk,
int(transient_fund_amount),
)
signed_payment = context.wallet.sign_transaction(payment)
context.app_acl.send_transaction(signed_payment)
transaction.wait_for_confirmation(context.app_acl, payment.get_txid(), 10)
@step(
'I build an application transaction with the transient account, the current application, suggested params, operation "{operation}", approval-program "{approval_program:MaybeString}", clear-program "{clear_program:MaybeString}", global-bytes {global_bytes}, global-ints {global_ints}, local-bytes {local_bytes}, local-ints {local_ints}, app-args "{app_args:MaybeString}", foreign-apps "{foreign_apps:MaybeString}", foreign-assets "{foreign_assets:MaybeString}", app-accounts "{app_accounts:MaybeString}", extra-pages {extra_pages}'
)
def build_app_txn_with_transient(
context,
operation,
approval_program,
clear_program,
global_bytes,
global_ints,
local_bytes,
local_ints,
app_args,
foreign_apps,
foreign_assets,
app_accounts,
extra_pages,
):
application_id = 0
if operation == "none":
operation = None
else:
if (
hasattr(context, "current_application_id")
and context.current_application_id
and operation != "create"
):
application_id = context.current_application_id
operation = operation_string_to_enum(operation)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program == "none":
approval_program = None
elif approval_program:
with open(
dir_path + "/test/features/resources/" + approval_program, "rb"
) as f:
approval_program = bytearray(f.read())
if clear_program == "none":
clear_program = None
elif clear_program:
with open(
dir_path + "/test/features/resources/" + clear_program, "rb"
) as f:
clear_program = bytearray(f.read())
local_schema = transaction.StateSchema(
num_uints=int(local_ints), num_byte_slices=int(local_bytes)
)
global_schema = transaction.StateSchema(
num_uints=int(global_ints), num_byte_slices=int(global_bytes)
)
if app_args == "none":
app_args = None
elif app_args:
app_args = split_and_process_app_args(app_args)
if foreign_apps == "none":
foreign_apps = None
elif foreign_apps:
foreign_apps = [int(app) for app in foreign_apps.split(",")]
if foreign_assets == "none":
foreign_assets = None
elif foreign_assets:
foreign_assets = [int(asset) for asset in foreign_assets.split(",")]
if app_accounts == "none":
app_accounts = None
elif app_accounts:
app_accounts = [
account_pubkey for account_pubkey in app_accounts.split(",")
]
sp = context.app_acl.suggested_params()
context.app_transaction = transaction.ApplicationCallTxn(
sender=context.transient_pk,
sp=sp,
index=int(application_id),
on_complete=operation,
local_schema=local_schema,
global_schema=global_schema,
approval_program=approval_program,
clear_program=clear_program,
app_args=app_args,
accounts=app_accounts,
foreign_apps=foreign_apps,
foreign_assets=foreign_assets,
extra_pages=int(extra_pages),
note=None,
lease=None,
rekey_to=None,
)
@step(
'I sign and submit the transaction, saving the txid. If there is an error it is "{error_string:MaybeString}".'
)
def sign_submit_save_txid_with_error(context, error_string):
try:
signed_app_transaction = context.app_transaction.sign(
context.transient_sk
)
context.app_txid = context.app_acl.send_transaction(
signed_app_transaction
)
except Exception as e:
if not error_string or error_string not in str(e):
raise RuntimeError(
"error string "
+ error_string
+ " not in actual error "
+ str(e)
)
@step("I wait for the transaction to be confirmed.")
def wait_for_app_txn_confirm(context):
sp = context.app_acl.suggested_params()
last_round = sp.first
context.app_acl.status_after_block(last_round + 2)
if hasattr(context, "acl"):
assert "type" in context.acl.transaction_info(
context.transient_pk, context.app_txid
)
assert "type" in context.acl.transaction_by_id(context.app_txid)
else:
transaction.wait_for_confirmation(
context.app_acl, context.app_txid, 10
)
@given("I remember the new application ID.")
def remember_app_id(context):
if hasattr(context, "acl"):
context.current_application_id = context.acl.pending_transaction_info(
context.app_txid
)["txresults"]["createdapp"]
else:
context.current_application_id = (
context.app_acl.pending_transaction_info(context.app_txid)[
"application-index"
]
)
@given("an application id {app_id}")
def set_app_id(context, app_id):
context.current_application_id = app_id
@step(
'The transient account should have the created app "{app_created_bool_as_string:MaybeString}" and total schema byte-slices {byte_slices} and uints {uints}, the application "{application_state:MaybeString}" state contains key "{state_key:MaybeString}" with value "{state_value:MaybeString}"'
)
def verify_app_txn(
context,
app_created_bool_as_string,
byte_slices,
uints,
application_state,
state_key,
state_value,
):
account_info = context.app_acl.account_info(context.transient_pk)
app_total_schema = account_info["apps-total-schema"]
assert app_total_schema["num-byte-slice"] == int(byte_slices)
assert app_total_schema["num-uint"] == int(uints)
app_created = app_created_bool_as_string == "true"
created_apps = account_info["created-apps"]
# If we don't expect the app to exist, verify that it isn't there and exit.
if not app_created:
for app in created_apps:
assert app["id"] != context.current_application_id
return
found_app = False
for app in created_apps:
found_app = found_app or app["id"] == context.current_application_id
assert found_app
# If there is no key to check, we're done.
if state_key is None or state_key == "":
return
found_value_for_key = False
key_values = list()
if application_state == "local":
counter = 0
for local_state in account_info["apps-local-state"]:
if local_state["id"] == context.current_application_id:
key_values = local_state["key-value"]
counter = counter + 1
assert counter == 1
elif application_state == "global":
counter = 0
for created_app in account_info["created-apps"]:
if created_app["id"] == context.current_application_id:
key_values = created_app["params"]["global-state"]
counter = counter + 1
assert counter == 1
else:
raise NotImplementedError(
'test does not understand application state "'
+ application_state
+ '"'
)
assert len(key_values) > 0
for key_value in key_values:
found_key = key_value["key"]
if found_key == state_key:
found_value_for_key = True
found_value = key_value["value"]
if found_value["type"] == 1:
assert found_value["bytes"] == state_value
elif found_value["type"] == 0:
assert found_value["uint"] == int(state_value)
assert found_value_for_key
def load_resource(res):
"""load data from features/resources"""
dir_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dir_path, "..", "features", "resources", res)
with open(path, "rb") as fin:
data = fin.read()
return data
@when('I compile a teal program "{program}"')
def compile_step(context, program):
data = load_resource(program)
source = data.decode("utf-8")
try:
context.response = context.app_acl.compile(source)
context.status = 200
except AlgodHTTPError as ex:
context.status = ex.code
context.response = dict(result="", hash="")
@then(
'it is compiled with {status} and "{result:MaybeString}" and "{hash:MaybeString}"'
)
def compile_check_step(context, status, result, hash):
assert context.status == int(status)
assert context.response["result"] == result
assert context.response["hash"] == hash
@when('I dryrun a "{kind}" program "{program}"')
def dryrun_step(context, kind, program):
data = load_resource(program)
sp = transaction.SuggestedParams(
int(1000), int(1), int(100), "", flat_fee=True
)
zero_addr = encoding.encode_address(bytes(32))
txn = transaction.Transaction(zero_addr, sp, None, None, "pay", None)
sources = []
if kind == "compiled":
lsig = transaction.LogicSig(data)
txns = [transaction.LogicSigTransaction(txn, lsig)]
elif kind == "source":
txns = [transaction.SignedTransaction(txn, None)]
sources = [DryrunSource(field_name="lsig", source=data, txn_index=0)]
else:
assert False, f"kind {kind} not in (source, compiled)"
drr = DryrunRequest(txns=txns, sources=sources)
context.response = context.app_acl.dryrun(drr)
@then('I get execution result "{result}"')
def dryrun_check_step(context, result):
ddr = context.response
assert len(ddr["txns"]) > 0
res = ddr["txns"][0]
if (
res["logic-sig-messages"] is not None
and len(res["logic-sig-messages"]) > 0
):
msgs = res["logic-sig-messages"]
elif (
res["app-call-messages"] is not None
and len(res["app-call-messages"]) > 0
):
msgs = res["app-call-messages"]
assert len(msgs) > 0
assert msgs[-1] == result
@when("we make any Dryrun call")
def dryrun_any_call_step(context):
context.response = context.acl.dryrun(DryrunRequest())
@then(
'the parsed Dryrun Response should have global delta "{creator}" with {action}'
)
def dryrun_parsed_response(context, creator, action):
ddr = context.response
assert len(ddr["txns"]) > 0
delta = ddr["txns"][0]["global-delta"]
assert len(delta) > 0
assert delta[0]["key"] == creator
assert delta[0]["value"]["action"] == int(action)
@given('dryrun test case with "{program}" of type "{kind}"')
def dryrun_test_case_step(context, program, kind):
if kind not in set(["lsig", "approv", "clearp"]):
assert False, f"kind {kind} not in (lsig, approv, clearp)"
prog = load_resource(program)
# check if source
if prog[0] > 0x20:
prog = prog.decode("utf-8")
context.dryrun_case_program = prog
context.dryrun_case_kind = kind
@then('status assert of "{status}" is succeed')
def dryrun_test_case_status_assert_step(context, status):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
lsig = None
app = None
if context.dryrun_case_kind == "lsig":
lsig = dict()
if context.dryrun_case_kind == "approv":
app = dict()
elif context.dryrun_case_kind == "clearp":
app = dict(on_complete=transaction.OnComplete.ClearStateOC)
if status == "PASS":
ts.assertPass(context.dryrun_case_program, lsig=lsig, app=app)
else:
ts.assertReject(context.dryrun_case_program, lsig=lsig, app=app)
def dryrun_test_case_global_state_assert_impl(
context, key, value, action, raises
):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
action = int(action)
val = dict(action=action)
if action == 1:
val["bytes"] = value
elif action == 2:
val["uint"] = int(value)
on_complete = transaction.OnComplete.NoOpOC
if context.dryrun_case_kind == "clearp":
on_complete = transaction.OnComplete.ClearStateOC
raised = False
try:
ts.assertGlobalStateContains(
context.dryrun_case_program,
dict(key=key, value=val),
app=dict(on_complete=on_complete),
)
except AssertionError:
raised = True
if raises:
ts.assertTrue(raised, "assertGlobalStateContains expected to raise")
@then('global delta assert with "{key}", "{value}" and {action} is succeed')
def dryrun_test_case_global_state_assert_step(context, key, value, action):
dryrun_test_case_global_state_assert_impl(
context, key, value, action, False
)
@then('global delta assert with "{key}", "{value}" and {action} is failed')
def dryrun_test_case_global_state_assert_fail_step(
context, key, value, action
):
dryrun_test_case_global_state_assert_impl(
context, key, value, action, True
)
@then(
'local delta assert for "{account}" of accounts {index} with "{key}", "{value}" and {action} is succeed'
)
def dryrun_test_case_local_state_assert_fail_step(
context, account, index, key, value, action
):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
action = int(action)
val = dict(action=action)
if action == 1:
val["bytes"] = value
elif action == 2:
val["uint"] = int(value)
on_complete = transaction.OnComplete.NoOpOC
if context.dryrun_case_kind == "clearp":
on_complete = transaction.OnComplete.ClearStateOC
app_idx = 1
accounts = [
Account(
address=ts.default_address(),
status="Offline",
apps_local_state=[ApplicationLocalState(id=app_idx)],
)
] * 2
accounts[int(index)].address = account
drr = ts.dryrun_request(
context.dryrun_case_program,
sender=accounts[0].address,
app=dict(app_idx=app_idx, on_complete=on_complete, accounts=accounts),
)
ts.assertNoError(drr)
ts.assertLocalStateContains(drr, account, dict(key=key, value=val))
@given("a new AtomicTransactionComposer")
def create_atomic_transaction_composer(context):
context.atomic_transaction_composer = (
atomic_transaction_composer.AtomicTransactionComposer()
)
context.method_list = []
@given("I make a transaction signer for the transient account.")
def create_transient_transaction_signer(context):
private_key = context.transient_sk
context.transaction_signer = (
atomic_transaction_composer.AccountTransactionSigner(private_key)
)
@when("I make a transaction signer for the {account_type} account.")
def create_transaction_signer(context, account_type):
if account_type == "transient":
private_key = context.transient_sk
elif account_type == "signing":
private_key = mnemonic.to_private_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
context.transaction_signer = (
atomic_transaction_composer.AccountTransactionSigner(private_key)
)
@step('I create the Method object from method signature "{method_signature}"')
def build_abi_method(context, method_signature):
context.abi_method = abi.Method.from_signature(method_signature)
if not hasattr(context, "method_list"):
context.method_list = []
context.method_list.append(context.abi_method)
@step("I create a transaction with signer with the current transaction.")
def create_transaction_with_signer(context):
context.transaction_with_signer = (
atomic_transaction_composer.TransactionWithSigner(
context.transaction, context.transaction_signer
)
)
@when("I add the current transaction with signer to the composer.")
def add_transaction_to_composer(context):
context.atomic_transaction_composer.add_transaction(
context.transaction_with_signer
)
def process_abi_args(method, arg_tokens):
method_args = []
for arg_index, arg in enumerate(method.args):
# Skip arg if it does not have a type
if isinstance(arg.type, abi.ABIType):
method_arg = arg.type.decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
elif arg.type == abi.ABIReferenceType.ACCOUNT:
method_arg = abi.AddressType().decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
elif (
arg.type == abi.ABIReferenceType.APPLICATION
or arg.type == abi.ABIReferenceType.ASSET
):
method_arg = abi.UintType(64).decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
else:
# Append the transaction signer as is
method_args.append(arg_tokens[arg_index])
return method_args
@step("I create a new method arguments array.")
def create_abi_method_args(context):
context.method_args = []
@step(
"I append the current transaction with signer to the method arguments array."
)
def append_txn_to_method_args(context):
context.method_args.append(context.transaction_with_signer)
@step(
'I append the encoded arguments "{method_args:MaybeString}" to the method arguments array.'
)
def append_app_args_to_method_args(context, method_args):
# Returns a list of ABI method arguments
app_args = method_args.split(",")
context.method_args += app_args
@step(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments.'
)
def add_abi_method_call(context, account_type, operation):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
app_args = process_abi_args(context.abi_method, context.method_args)
context.atomic_transaction_composer.add_method_call(
app_id=int(context.current_application_id),
method=context.abi_method,
sender=sender,
sp=context.suggested_params,
signer=context.transaction_signer,
method_args=app_args,
on_complete=operation_string_to_enum(operation),
)
@when(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments, approval-program "{approval_program_path:MaybeString}", clear-program "{clear_program_path:MaybeString}", global-bytes {global_bytes}, global-ints {global_ints}, local-bytes {local_bytes}, local-ints {local_ints}, extra-pages {extra_pages}.'
)
def add_abi_method_call_creation(
context,
account_type,
operation,
approval_program_path,
clear_program_path,
global_bytes,
global_ints,
local_bytes,
local_ints,
extra_pages,
):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program_path:
with open(
dir_path + "/test/features/resources/" + approval_program_path,
"rb",
) as f:
approval_program = bytearray(f.read())
else:
approval_program = None
if clear_program_path:
with open(
dir_path + "/test/features/resources/" + clear_program_path, "rb"
) as f:
clear_program = bytearray(f.read())
else:
clear_program = None
local_schema = transaction.StateSchema(
num_uints=int(local_ints), num_byte_slices=int(local_bytes)
)
global_schema = transaction.StateSchema(
num_uints=int(global_ints), num_byte_slices=int(global_bytes)
)
extra_pages = int(extra_pages)
app_args = process_abi_args(context.abi_method, context.method_args)
context.atomic_transaction_composer.add_method_call(
app_id=int(context.current_application_id),
method=context.abi_method,
sender=sender,
sp=context.suggested_params,
signer=context.transaction_signer,
method_args=app_args,
on_complete=operation_string_to_enum(operation),
local_schema=local_schema,
global_schema=global_schema,
approval_program=approval_program,
clear_program=clear_program,
extra_pages=extra_pages,
)
@when(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments, approval-program "{approval_program_path:MaybeString}", clear-program "{clear_program_path:MaybeString}".'
)
def add_abi_method_call_creation(
context, account_type, operation, approval_program_path, clear_program_path
):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program_path:
with open(
dir_path + "/test/features/resources/" + approval_program_path,
"rb",
) as f:
approval_program = bytearray(f.read())
else:
approval_program = None
if clear_program_path:
with open(
dir_path + "/test/features/resources/" + clear_program_path, "rb"
) as f:
clear_program = bytearray(f.read())
else:
clear_program = None
app_args = process_abi_args(context.abi_method, context.method_args)
context.atomic_transaction_composer.add_method_call(
app_id=int(context.current_application_id),
method=context.abi_method,
sender=sender,
sp=context.suggested_params,
signer=context.transaction_signer,
method_args=app_args,
on_complete=operation_string_to_enum(operation),
approval_program=approval_program,
clear_program=clear_program,
)
@step(
'I build the transaction group with the composer. If there is an error it is "{error_string:MaybeString}".'
)
def build_atomic_transaction_group(context, error_string):
try:
context.atomic_transaction_composer.build_group()
except Exception as e:
if not error_string:
raise RuntimeError(f"Unexpected error for building composer {e}")
elif error_string == "zero group size error":
error_message = (
"no transactions to build for AtomicTransactionComposer"
)
assert error_message in str(e)
else:
raise NotImplemented(
f"Unknown error string for building composer: {error_string}"
)
def composer_status_string_to_enum(status):
if status == "BUILDING":
return (
atomic_transaction_composer.AtomicTransactionComposerStatus.BUILDING
)
elif status == "BUILT":
return (
atomic_transaction_composer.AtomicTransactionComposerStatus.BUILT
)
elif status == "SIGNED":
return (
atomic_transaction_composer.AtomicTransactionComposerStatus.SIGNED
)
elif status == "SUBMITTED":
return (
atomic_transaction_composer.AtomicTransactionComposerStatus.SUBMITTED
)
elif status == "COMMITTED":
return (
atomic_transaction_composer.AtomicTransactionComposerStatus.COMMITTED
)
else:
raise NotImplementedError(
"no AtomicTransactionComposerStatus enum for " + status
)
@then('The composer should have a status of "{status}".')
def check_atomic_transaction_composer_status(context, status):
assert (
context.atomic_transaction_composer.get_status()
== composer_status_string_to_enum(status)
)
@then("I gather signatures with the composer.")
def gather_signatures_composer(context):
context.signed_transactions = (
context.atomic_transaction_composer.gather_signatures()
)
@then("I clone the composer.")
def clone_atomic_transaction_composer(context):
context.atomic_transaction_composer = (
context.atomic_transaction_composer.clone()
)
@then("I execute the current transaction group with the composer.")
def execute_atomic_transaction_composer(context):
context.atomic_transaction_composer_return = (
context.atomic_transaction_composer.execute(context.app_acl, 10)
)
assert context.atomic_transaction_composer_return.confirmed_round > 0
@then('The app should have returned "{returns:MaybeString}".')
def check_atomic_transaction_composer_response(context, returns):
if not returns:
expected_tokens = []
assert len(context.atomic_transaction_composer_return.abi_results) == 1
result = context.atomic_transaction_composer_return.abi_results[0]
assert result.return_value is None
assert result.decode_error is None
else:
expected_tokens = returns.split(",")
for i, expected in enumerate(expected_tokens):
result = context.atomic_transaction_composer_return.abi_results[i]
if not returns or not expected_tokens[i]:
assert result.return_value is None
assert result.decode_error is None
continue
expected_bytes = base64.b64decode(expected)
expected_value = context.method_list[i].returns.type.decode(
expected_bytes
)
assert expected_bytes == result.raw_value, "actual is {}".format(
result.raw_value
)
assert (
expected_value == result.return_value
), "actual is {}".format(result.return_value)
assert result.decode_error is None
@when("I serialize the Method object into json")
def serialize_method_to_json(context):
context.json_output = context.abi_method.dictify()
@then(
'the produced json should equal "{json_path}" loaded from "{json_directory}"'
)
def check_json_output_equals(context, json_path, json_directory):
with open(
"test/features/unit/" + json_directory + "/" + json_path, "rb"
) as f:
loaded_response = json.load(f)
assert context.json_output == loaded_response
@when(
'I create the Method object with name "{method_name}" method description "{method_desc}" first argument type "{first_arg_type}" first argument description "{first_arg_desc}" second argument type "{second_arg_type}" second argument description "{second_arg_desc}" and return type "{return_arg_type}"'
)
def create_method_from_test_with_arg_name(
context,
method_name,
method_desc,
first_arg_type,
first_arg_desc,
second_arg_type,
second_arg_desc,
return_arg_type,
):
context.abi_method = abi.Method(
name=method_name,
args=[
abi.Argument(arg_type=first_arg_type, desc=first_arg_desc),
abi.Argument(arg_type=second_arg_type, desc=second_arg_desc),
],
returns=abi.Returns(return_arg_type),
desc=method_desc,
)
@when(
'I create the Method object with name "{method_name}" first argument name "{first_arg_name}" first argument type "{first_arg_type}" second argument name "{second_arg_name}" second argument type "{second_arg_type}" and return type "{return_arg_type}"'
)
def create_method_from_test_with_arg_name(
context,
method_name,
first_arg_name,
first_arg_type,
second_arg_name,
second_arg_type,
return_arg_type,
):
context.abi_method = abi.Method(
name=method_name,
args=[
abi.Argument(arg_type=first_arg_type, name=first_arg_name),
abi.Argument(arg_type=second_arg_type, name=second_arg_name),
],
returns=abi.Returns(return_arg_type),
)
@when(
'I create the Method object with name "{method_name}" first argument type "{first_arg_type}" second argument type "{second_arg_type}" and return type "{return_arg_type}"'
)
def create_method_from_test(
context, method_name, first_arg_type, second_arg_type, return_arg_type
):
context.abi_method = abi.Method(
name=method_name,
args=[abi.Argument(first_arg_type), abi.Argument(second_arg_type)],
returns=abi.Returns(return_arg_type),
)
@then("the deserialized json should equal the original Method object")
def deserialize_method_to_object(context):
json_string = json.dumps(context.json_output)
actual = abi.Method.from_json(json_string)
assert actual == context.abi_method
@then("the txn count should be {txn_count}")
def check_method_txn_count(context, txn_count):
assert context.abi_method.get_txn_calls() == int(txn_count)
@then('the method selector should be "{method_selector}"')
def check_method_selector(context, method_selector):
assert context.abi_method.get_selector() == bytes.fromhex(method_selector)
@when(
'I create an Interface object from the Method object with name "{interface_name}" and description "{description}"'
)
def create_interface_object(context, interface_name, description):
context.abi_interface = abi.Interface(
name=interface_name, desc=description, methods=[context.abi_method]
)
@when("I serialize the Interface object into json")
def serialize_interface_to_json(context):
context.json_output = context.abi_interface.dictify()
@then("the deserialized json should equal the original Interface object")
def deserialize_json_to_interface(context):
actual = abi.Interface.undictify(context.json_output)
assert actual == context.abi_interface
@when(
'I create a Contract object from the Method object with name "{contract_name}" and description "{description}"'
)
def create_contract_object(context, contract_name, description):
context.abi_contract = abi.Contract(
name=contract_name, desc=description, methods=[context.abi_method]
)
@when('I set the Contract\'s appID to {app_id} for the network "{network_id}"')
def set_contract_networks(context, app_id, network_id):
if not context.abi_contract.networks:
context.abi_contract.networks = {}
context.abi_contract.networks[network_id] = NetworkInfo(int(app_id))
@when("I serialize the Contract object into json")
def serialize_contract_to_json(context):
context.json_output = context.abi_contract.dictify()
@then("the deserialized json should equal the original Contract object")
def deserialize_json_to_contract(context):
actual = abi.Contract.undictify(context.json_output)
assert actual == context.abi_contract
| 33.239292
| 650
| 0.676191
|
ddc85b0aa2b358aee2abb832ba0fe1a53a9fd1f9
| 1,159
|
py
|
Python
|
tests/utils.py
|
AlessandroVol23/city-scrapers
|
b1ac52357fe5974a6f12cada15f3833ffbe94748
|
[
"MIT"
] | 1
|
2020-03-28T22:50:50.000Z
|
2020-03-28T22:50:50.000Z
|
tests/utils.py
|
AlessandroVol23/city-scrapers
|
b1ac52357fe5974a6f12cada15f3833ffbe94748
|
[
"MIT"
] | 1
|
2019-10-05T04:05:48.000Z
|
2019-10-05T04:05:48.000Z
|
tests/utils.py
|
firejava/city-scrapers
|
749f40bf1bd933726768d7d67e5211aef13af547
|
[
"MIT"
] | 1
|
2020-10-01T15:29:24.000Z
|
2020-10-01T15:29:24.000Z
|
import os
from scrapy.http import HtmlResponse, Request, TextResponse
def file_response(file_name, url=None):
"""
Create a Scrapy fake HTTP response from a HTML file
@param file_name: The relative filename from the tests directory,
but absolute paths are also accepted.
@param url: The URL of the response.
returns: A scrapy HTTP response which can be used for unittesting.
Based on https://stackoverflow.com/a/12741030, a nice bit of hacking.
"""
if not url:
url = 'http://www.example.com'
request = Request(url=url)
file_content = read_test_file_content(file_name)
if file_name[-5:] == '.json':
body = file_content
return TextResponse(url=url, body=body, encoding='utf-8')
body = str.encode(file_content)
return HtmlResponse(url=url, request=request, body=body)
def read_test_file_content(file_name):
if not file_name[0] == '/':
tests_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(tests_dir, file_name)
else:
file_path = file_name
return open(file_path, 'r', encoding='utf-8').read()
| 30.5
| 73
| 0.672131
|
fbed19dd14123ff21f4d49d3653bdbf666d83f81
| 250
|
py
|
Python
|
astroML/linear_model/__init__.py
|
DinoBektesevic/astroML
|
b4e699bf45a65e233b40d60323c05eafa1d4955e
|
[
"BSD-2-Clause"
] | 1
|
2020-10-28T14:45:48.000Z
|
2020-10-28T14:45:48.000Z
|
astroML/linear_model/__init__.py
|
allenwangg/astroML
|
b4e699bf45a65e233b40d60323c05eafa1d4955e
|
[
"BSD-2-Clause"
] | 1
|
2018-05-18T19:32:15.000Z
|
2018-05-18T19:32:15.000Z
|
astroML/linear_model/__init__.py
|
DinoBektesevic/astroML
|
b4e699bf45a65e233b40d60323c05eafa1d4955e
|
[
"BSD-2-Clause"
] | null | null | null |
import warnings
from .linear_regression import LinearRegression, PolynomialRegression, BasisFunctionRegression
from .linear_regression_errors import LinearRegressionwithErrors
from .kernel_regression import NadarayaWatson
from .TLS import TLS_logL
| 31.25
| 94
| 0.888
|
2c812a9e788ec946e170261d327a103b297dcb7a
| 7,565
|
py
|
Python
|
src_rnn_rl/main.py
|
icdm2021submission/Continual-Neural-Network-Model-Retraining
|
7a84f211c7750b862fa5e31293d22d4d0dabed23
|
[
"MIT"
] | null | null | null |
src_rnn_rl/main.py
|
icdm2021submission/Continual-Neural-Network-Model-Retraining
|
7a84f211c7750b862fa5e31293d22d4d0dabed23
|
[
"MIT"
] | null | null | null |
src_rnn_rl/main.py
|
icdm2021submission/Continual-Neural-Network-Model-Retraining
|
7a84f211c7750b862fa5e31293d22d4d0dabed23
|
[
"MIT"
] | null | null | null |
# rm *.txt & ./bash.sh
# experiments/base_model/params.json
# cd /Users/xiaofengzhu/Documents/continual_learning/src
# tensorboard --logdir
import argparse
import logging
import os
import time
import glob
import tensorflow as tf
from model.utils import Params
from model.utils import set_logger
from model.utils import cal_train_size
from model.training import train_and_evaluate
from model.reader import load_dataset_from_tfrecords
from model.reader import input_fn
from model.modeling import model_fn
from model.evaluation import evaluate
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/base_model',
help="Directory containing params.json")
# loss functions
# cnn, boost, retrain_regu
parser.add_argument('--loss_fn', default='cnn', help="model loss function")
# tf data folder for
# mnist, cifar-10
parser.add_argument('--data_dir', default='../data/mnist',
help="Directory containing the dataset")
# test.tfrecords
parser.add_argument('--tfrecords_filename', default='.tfrecords',
help="Dataset-filename for the tfrecords")
# usage: python main.py --restore_dir experiments/base_model/best_weights
parser.add_argument('--restore_dir', default=None, # experimens/base_model/best_weights
help="Optional, directory containing weights to reload")
parser.add_argument('--train_range', default='[0-4]',
help="training tf range")
# train on datasets A and B
parser.add_argument('--combine', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \
help="try on augmented test dataset")
parser.add_argument('--finetune', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \
help="try on dataset")
parser.add_argument('--use_kfac', default=False, type=lambda x: (str(x).lower() in ['true','1', 'yes']), \
help="usek fac true gradient")
parser.add_argument('--log', default='',
help="train log postfix")
parser.add_argument('--rl', default='EXP3',
help="rl algorithm")
if __name__ == '__main__':
# Train the model
tf.reset_default_graph()
# Set the random seed for the whole graph for reproductible experiments
tf.set_random_seed(230)
# Load the parameters from the experiment params.json file in model_dir
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
# Set the logger
set_logger(os.path.join(args.model_dir, 'train{}.log'.format(args.log)))
params = Params(json_path)
params.dict['loss_fn'] = args.loss_fn
params.dict['finetune'] = args.finetune
params.dict['collect'] = False
params.dict['use_kfac'] = args.use_kfac
params.dict['data_dir'] = args.data_dir
if 'reuters' in args.data_dir:
params.dict['num_classes'] = 46
if args.rl:
params.dict['rl'] = args.rl
# Load the parameters from the dataset, that gives the size etc. into params
json_path = os.path.join(args.data_dir, 'dataset_params.json')
assert os.path.isfile(json_path), "No json file found at {}, please generate tfrecords".format(json_path)
params.update(json_path)
# print(params.dict)
params.dict['sample_size'] = cal_train_size(params.train_size, '1')
if args.train_range == '[0-4]':
params.dict['train_size'] = cal_train_size(params.train_size, args.train_range)
else:
params.dict['train_size'] = cal_train_size(params.train_size, args.train_range) + cal_sample_size(params.train_size, args.cal_sample_ratio) # sample.tfrecords is around the same size
last_global_epoch, global_epoch = 0, 0
if params.num_learners <= 1:# not args.retrain or args.combine:
if args.combine:
# train from scratch
path_train_tfrecords = os.path.join(args.data_dir, 'train-{}'.format(args.train_range) + args.tfrecords_filename)
# path_eval_tfrecords = os.path.join(args.data_dir, 'validation' + args.tfrecords_filename)
path_eval_tfrecords = os.path.join(args.data_dir, 'validation-{}'.format(args.train_range) + args.tfrecords_filename)
# Create the input data pipeline
logging.info("Creating the datasets...")
train_dataset = load_dataset_from_tfrecords(glob.glob(path_train_tfrecords))
eval_dataset = load_dataset_from_tfrecords(glob.glob(path_eval_tfrecords))
elif args.finetune:
args.restore_dir = 'best_weights'
path_train_tfrecords = os.path.join(args.data_dir, 'train-{}'.format(args.train_range) + args.tfrecords_filename)
path_sample_train_tfrecords = os.path.join(args.data_dir, 'sample' + args.tfrecords_filename)
# print('path_train_tfrecords: {} ~~~~~~'.format(path_train_tfrecords))
#
path_eval_tfrecords = os.path.join(args.data_dir, 'validation-{}'.format(args.train_range) + args.tfrecords_filename)
# Create the input data pipeline
logging.info("Creating the datasets...")
training_files = glob.glob(path_train_tfrecords)
training_files.append(path_sample_train_tfrecords)
print('glob.glob(path_train_tfrecords): {} ~~~~~~'.format(training_files))
train_dataset = load_dataset_from_tfrecords(glob.glob(path_train_tfrecords))
# eval_dataset = load_dataset_from_tfrecords(path_eval_tfrecords)
eval_dataset = load_dataset_from_tfrecords(glob.glob(path_eval_tfrecords))
else:
# initial ~ [1-5]
path_train_tfrecords = os.path.join(args.data_dir, 'train-{}'.format(args.train_range) + args.tfrecords_filename)
# path_eval_tfrecords = os.path.join(args.data_dir, 'validation' + args.tfrecords_filename)
path_eval_tfrecords = os.path.join(args.data_dir, 'validation-{}'.format(args.train_range) + args.tfrecords_filename)
print(path_train_tfrecords)
# Create the input data pipeline
logging.info("Creating the datasets...")
train_dataset = load_dataset_from_tfrecords(glob.glob(path_train_tfrecords))
# eval_dataset = load_dataset_from_tfrecords(path_eval_tfrecords)
eval_dataset = load_dataset_from_tfrecords(glob.glob(path_eval_tfrecords))
# Specify other parameters for the dataset and the model
# Create the two iterators over the two datasets
logging.info('train_size: {}'.format(params.train_size))
train_inputs = input_fn('train', train_dataset, params)
eval_inputs = input_fn('vali', eval_dataset, params)
logging.info("- done.")
# Define the models (2 different set of nodes that share weights for train and validation)
logging.info("Creating the model...")
train_model_spec = model_fn('train', train_inputs, params)
eval_model_spec = model_fn('vali', eval_inputs, params, reuse=True)
logging.info("- done.")
logging.info("Starting training for at most {} epoch(s) for the initial learner".format(params.num_epochs))
start_time = time.time()
global_epoch = train_and_evaluate(train_model_spec, eval_model_spec, args.model_dir, params, \
restore_from=args.restore_dir)
logging.info("global_epoch: {} epoch(s) at learner 0".format(global_epoch))
logging.info("total time: %s seconds ---" % (time.time() - start_time))
| 54.818841
| 190
| 0.689623
|
c9b763f3631c164af1a523bc44a3bce63bbba5b1
| 17,778
|
py
|
Python
|
src/dataset/mscoco.py
|
jielyu/ml-models
|
162f4dae2e4e66642c9cf995827c90cf0be0311e
|
[
"MIT"
] | 1
|
2022-03-27T06:58:38.000Z
|
2022-03-27T06:58:38.000Z
|
src/dataset/mscoco.py
|
jielyu/ml-models
|
162f4dae2e4e66642c9cf995827c90cf0be0311e
|
[
"MIT"
] | null | null | null |
src/dataset/mscoco.py
|
jielyu/ml-models
|
162f4dae2e4e66642c9cf995827c90cf0be0311e
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import os
import io
import itertools
import json
import tempfile
import time
import cv2
import numpy as np
import torch
from torch.utils.data.dataloader import default_collate
import torchvision
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from loguru import logger
import contextlib
from loguru import logger
from tqdm import tqdm
import matplotlib.pyplot as plt
from dataset.datasets_wrapper import Dataset
class COCODataset(Dataset):
"""
COCO dataset class.
"""
COCO_CLASSES = (
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
)
def __init__(
self,
data_dir=None,
json_file="instances_train2017.json",
name="train2017",
img_size=(416, 416),
preproc=None,
cache=False,
dataset_name="MSCOCO-dataset",
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
dataset_name: name of dataset folder
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join("dataset", dataset_name)
else:
data_dir = os.path.join(data_dir, dataset_name)
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.imgs = None
self.name = name
self.img_size = img_size
self.preproc = preproc
self.annotations = self._load_coco_annotations()
if cache:
self._cache_images()
def __len__(self):
return len(self.ids)
def __del__(self):
del self.imgs
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def _cache_images(self):
logger.warning(
"\n********************************************************************************\n"
"You are using cached images in RAM to accelerate training.\n"
"This requires large system RAM.\n"
"Make sure you have 200G+ RAM and 136G available disk space for training COCO.\n"
"********************************************************************************\n"
)
max_h = self.img_size[0]
max_w = self.img_size[1]
cache_file = self.data_dir + "/img_resized_cache_" + self.name + ".array"
if not os.path.exists(cache_file):
logger.info(
"Caching images for the first time. This might take about 20 minutes for COCO"
)
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="w+",
)
from tqdm import tqdm
from multiprocessing.pool import ThreadPool
NUM_THREADs = min(8, os.cpu_count())
loaded_images = ThreadPool(NUM_THREADs).imap(
lambda x: self.load_resized_img(x),
range(len(self.annotations)),
)
pbar = tqdm(enumerate(loaded_images), total=len(self.annotations))
for k, out in pbar:
self.imgs[k][: out.shape[0], : out.shape[1], :] = out.copy()
self.imgs.flush()
pbar.close()
else:
logger.warning(
"You are using cached imgs! Make sure your dataset is not changed!!\n"
"Everytime the self.input_size is changed in your exp file, you need to delete\n"
"the cached data and re-generate them.\n"
)
logger.info("Loading cached imgs...")
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="r+",
)
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = np.max((0, obj["bbox"][0]))
y1 = np.max((0, obj["bbox"][1]))
x2 = np.min((width, x1 + np.max((0, obj["bbox"][2]))))
y2 = np.min((height, y1 + np.max((0, obj["bbox"][3]))))
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 5))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
r = min(self.img_size[0] / height, self.img_size[1] / width)
res[:, :4] *= r
img_info = (height, width)
resized_info = (int(height * r), int(width * r))
file_name = (
im_ann["file_name"]
if "file_name" in im_ann
else "{:012}".format(id_) + ".jpg"
)
return (res, img_info, resized_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def load_resized_img(self, index):
img = self.load_image(index)
r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
return resized_img
def load_image(self, index):
file_name = self.annotations[index][3]
img_file = os.path.join(self.data_dir, self.name, file_name)
img = cv2.imread(img_file)
assert img is not None
return img
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, resized_info, _ = self.annotations[index]
if self.imgs is not None:
pad_img = self.imgs[index]
img = pad_img[: resized_info[0], : resized_info[1], :].copy()
else:
img = self.load_resized_img(index)
return img, res.copy(), img_info, np.array([id_])
@Dataset.mosaic_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w.
h, w (int): original shape of the image
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
def time_synchronized():
"""pytorch-accurate time"""
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def postprocess(
prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False
):
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(
image_pred[:, 5 : 5 + num_classes], 1, keepdim=True
)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)
detections = detections[conf_mask]
if not detections.size(0):
continue
if class_agnostic:
nms_out_index = torchvision.ops.nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
nms_thre,
)
else:
nms_out_index = torchvision.ops.batched_nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
detections[:, 6],
nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = torch.cat((output[i], detections))
return output
def xyxy2xywh(bboxes):
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
return bboxes
class COCOEvaluator:
"""
COCO AP Evaluation class. All the data in the val2017 dataset are processed
and evaluated by COCO API.
"""
def __init__(
self, dataloader, img_size, confthre, nmsthre, num_classes, testdev=False
):
"""
Args:
dataloader (Dataloader): evaluate dataloader.
img_size (int): image size after preprocess. images are resized
to squares whose shape is (img_size, img_size).
confthre (float): confidence threshold ranging from 0 to 1, which
is defined in the config file.
nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
"""
self.dataloader = dataloader
self.img_size = img_size
self.confthre = confthre
self.nmsthre = nmsthre
self.num_classes = num_classes
self.testdev = testdev
def evaluate(
self,
model,
distributed=False,
half=False,
trt_file=None,
decoder=None,
test_size=None,
):
"""
COCO average precision (AP) Evaluation. Iterate inference on the test dataset
and the results are evaluated by COCO API.
NOTE: This function will change training mode to False, please save states if needed.
Args:
model : model to evaluate.
Returns:
ap50_95 (float) : COCO AP of IoU=50:95
ap50 (float) : COCO AP of IoU=50
summary (sr): summary info of evaluation.
"""
# TODO half to amp_test
tensor_type = torch.FloatTensor
if torch.cuda.is_available():
tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
model = model.eval()
if half:
model = model.half()
ids = []
data_list = []
progress_bar = tqdm
inference_time = 0
nms_time = 0
n_samples = max(len(self.dataloader) - 1, 1)
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
progress_bar(self.dataloader)
):
with torch.no_grad():
imgs = imgs.type(tensor_type)
# skip the the last iters since batchsize might be not enough for batch inference
is_time_record = cur_iter < len(self.dataloader) - 1
if is_time_record:
start = time.time()
outputs = model(imgs)[0]
if decoder is not None:
outputs = decoder(outputs, dtype=outputs.type())
if is_time_record:
infer_end = time_synchronized()
inference_time += infer_end - start
outputs = postprocess(
outputs, self.num_classes, self.confthre, self.nmsthre
)
if is_time_record:
nms_end = time_synchronized()
nms_time += nms_end - infer_end
data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids))
statistics = torch.FloatTensor([inference_time, nms_time, n_samples])
if torch.cuda.is_available():
statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])
eval_results = self.evaluate_prediction(data_list, statistics)
return eval_results
def convert_to_coco_format(self, outputs, info_imgs, ids):
data_list = []
for (output, img_h, img_w, img_id) in zip(
outputs, info_imgs[0], info_imgs[1], ids
):
if output is None:
continue
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
scale = min(
self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
)
bboxes /= scale
bboxes = xyxy2xywh(bboxes)
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
for ind in range(bboxes.shape[0]):
label = self.dataloader.dataset.class_ids[int(cls[ind])]
pred_data = {
"image_id": int(img_id),
"category_id": label,
"bbox": bboxes[ind].numpy().tolist(),
"score": scores[ind].numpy().item(),
"segmentation": [],
} # COCO json format
data_list.append(pred_data)
return data_list
def evaluate_prediction(self, data_dict, statistics):
logger.info("Evaluate in main process...")
annType = ["segm", "bbox", "keypoints"]
inference_time = statistics[0].item()
nms_time = statistics[1].item()
n_samples = statistics[2].item()
a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)
time_info = ", ".join(
[
"Average {} time: {:.2f} ms".format(k, v)
for k, v in zip(
["forward", "NMS", "inference"],
[a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],
)
]
)
info = time_info + "\n"
# Evaluate the Dt (detection) json comparing with the ground truth
if len(data_dict) > 0:
cocoGt = self.dataloader.dataset.coco
# TODO: since pycocotools can't process dict in py36, write data to json file.
if self.testdev:
json.dump(data_dict, open("./yolox_testdev_2017.json", "w"))
cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json")
else:
_, tmp = tempfile.mkstemp()
json.dump(data_dict, open(tmp, "w"))
cocoDt = cocoGt.loadRes(tmp)
cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
cocoEval.evaluate()
cocoEval.accumulate()
redirect_string = io.StringIO()
with contextlib.redirect_stdout(redirect_string):
cocoEval.summarize()
info += redirect_string.getvalue()
return cocoEval.stats[0], cocoEval.stats[1], info
else:
return 0, 0, info
| 32.090253
| 98
| 0.533018
|
750e236d873f31ec54e51ab034bc9b9ea2d8103b
| 17,916
|
py
|
Python
|
src/lib/parameters/px4params/srcparser.py
|
qperrotElistair/Firmware
|
60d613ea049bc04cf37487f0226e3e754cd20572
|
[
"BSD-3-Clause"
] | 2
|
2020-09-22T00:07:16.000Z
|
2020-09-25T18:51:27.000Z
|
src/lib/parameters/px4params/srcparser.py
|
qperrotElistair/Firmware
|
60d613ea049bc04cf37487f0226e3e754cd20572
|
[
"BSD-3-Clause"
] | 1
|
2018-09-19T16:59:35.000Z
|
2018-09-28T11:45:37.000Z
|
src/lib/parameters/px4params/srcparser.py
|
qperrotElistair/Firmware
|
60d613ea049bc04cf37487f0226e3e754cd20572
|
[
"BSD-3-Clause"
] | 2
|
2018-08-24T15:43:01.000Z
|
2018-08-24T15:59:13.000Z
|
import sys
import re
import math
global default_var
default_var = {}
class ParameterGroup(object):
"""
Single parameter group
"""
def __init__(self, name):
self.name = name
self.no_code_generation = False #for injected parameters
self.params = []
def AddParameter(self, param):
"""
Add parameter to the group
"""
self.params.append(param)
def GetName(self):
"""
Get parameter group name
"""
return self.name
def GetParams(self):
"""
Returns the parsed list of parameters. Every parameter is a Parameter
object. Note that returned object is not a copy. Modifications affect
state of the parser.
"""
return sorted(self.params, key=lambda param: param.name)
class Parameter(object):
"""
Single parameter
"""
# Define sorting order of the fields
priority = {
"board": 9,
"short_desc": 8,
"long_desc": 7,
"min": 5,
"max": 4,
"unit": 3,
"decimal": 2,
# all others == 0 (sorted alphabetically)
}
def __init__(self, name, type, default = ""):
self.fields = {}
self.values = {}
self.bitmask = {}
self.name = name
self.type = type
self.default = default
self.volatile = "false"
self.category = ""
self.boolean = False
def GetName(self):
return self.name
def GetType(self):
return self.type
def GetDefault(self):
return self.default
def GetCategory(self):
return self.category.title()
def GetVolatile(self):
return self.volatile
def GetBoolean(self):
return self.boolean
def SetField(self, code, value):
"""
Set named field value
"""
self.fields[code] = value
def SetEnumValue(self, code, value):
"""
Set named enum value
"""
self.values[code] = value
def SetBitmaskBit(self, index, bit):
"""
Set named enum value
"""
self.bitmask[index] = bit
def SetVolatile(self):
"""
Set volatile flag
"""
self.volatile = "true"
def SetBoolean(self):
"""
Set boolean flag
"""
self.boolean = True
def SetCategory(self, category):
"""
Set param category
"""
self.category = category
def GetFieldCodes(self):
"""
Return list of existing field codes in convenient order
"""
keys = self.fields.keys()
keys = sorted(keys)
keys = sorted(keys, key=lambda x: self.priority.get(x, 0), reverse=True)
return keys
def GetFieldValue(self, code):
"""
Return value of the given field code or None if not found.
"""
fv = self.fields.get(code)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv
def GetEnumCodes(self):
"""
Return list of existing value codes in convenient order
"""
return sorted(self.values.keys(), key=float)
def GetEnumValue(self, code):
"""
Return value of the given enum code or None if not found.
"""
fv = self.values.get(code)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv
def GetBitmaskList(self):
"""
Return list of existing bitmask codes in convenient order
"""
keys = self.bitmask.keys()
return sorted(keys, key=float)
def GetBitmaskBit(self, index):
"""
Return value of the given bitmask code or None if not found.
"""
fv = self.bitmask.get(index)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv
class SourceParser(object):
"""
Parses provided data and stores all found parameters internally.
"""
re_split_lines = re.compile(r'[\r\n]+')
re_comment_start = re.compile(r'^\/\*\*')
re_comment_content = re.compile(r'^\*\s*(.*)')
re_comment_tag = re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)\s*(.*)')
re_comment_end = re.compile(r'(.*?)\s*\*\/')
re_parameter_definition = re.compile(r'PARAM_DEFINE_([A-Z_][A-Z0-9_]*)\s*\(([A-Z_][A-Z0-9_]*)\s*,\s*([^ ,\)]+)\s*\)\s*;')
re_px4_parameter_definition = re.compile(r'PX4_PARAM_DEFINE_([A-Z_][A-Z0-9_]*)\s*\(([A-Z_][A-Z0-9_]*)\s*\)\s*;')
re_px4_param_default_definition = re.compile(r'#define\s*PARAM_([A-Z_][A-Z0-9_]*)\s*([^ ,\)]+)\s*')
re_cut_type_specifier = re.compile(r'[a-z]+$')
re_is_a_number = re.compile(r'^-?[0-9\.]')
re_remove_dots = re.compile(r'\.+$')
re_remove_carriage_return = re.compile('\n+')
valid_tags = set(["group", "board", "min", "max", "unit", "decimal", "increment", "reboot_required", "value", "boolean", "bit", "category", "volatile"])
# Order of parameter groups
priority = {
# All other groups = 0 (sort alphabetically)
"Miscellaneous": -10
}
def __init__(self):
self.param_groups = {}
def Parse(self, contents):
"""
Incrementally parse program contents and append all found parameters
to the list.
"""
# This code is essentially a comment-parsing grammar. "state"
# represents parser state. It contains human-readable state
# names.
state = None
for line in self.re_split_lines.split(contents):
line = line.strip()
# Ignore empty lines
if line == "":
continue
if self.re_comment_start.match(line):
state = "wait-short"
short_desc = None
long_desc = None
tags = {}
def_values = {}
def_bitmask = {}
elif state is not None and state != "comment-processed":
m = self.re_comment_end.search(line)
if m:
line = m.group(1)
last_comment_line = True
else:
last_comment_line = False
m = self.re_comment_content.match(line)
if m:
comment_content = m.group(1)
if comment_content == "":
# When short comment ends with empty comment line,
# start waiting for the next part - long comment.
if state == "wait-short-end":
state = "wait-long"
else:
m = self.re_comment_tag.match(comment_content)
if m:
tag, desc = m.group(1, 2)
if (tag == "value"):
# Take the meta info string and split the code and description
metainfo = desc.split(" ", 1)
def_values[metainfo[0]] = metainfo[1]
elif (tag == "bit"):
# Take the meta info string and split the code and description
metainfo = desc.split(" ", 1)
def_bitmask[metainfo[0]] = metainfo[1]
else:
tags[tag] = desc
current_tag = tag
state = "wait-tag-end"
elif state == "wait-short":
# Store first line of the short description
short_desc = comment_content
state = "wait-short-end"
elif state == "wait-short-end":
# Append comment line to the short description
short_desc += "\n" + comment_content
elif state == "wait-long":
# Store first line of the long description
long_desc = comment_content
state = "wait-long-end"
elif state == "wait-long-end":
# Append comment line to the long description
long_desc += "\n" + comment_content
elif state == "wait-tag-end":
# Append comment line to the tag text
tags[current_tag] += "\n" + comment_content
else:
raise AssertionError(
"Invalid parser state: %s" % state)
elif not last_comment_line:
# Invalid comment line (inside comment, but not starting with
# "*" or "*/". Reset parsed content.
state = None
if last_comment_line:
state = "comment-processed"
else:
tp = None
name = None
defval = ""
# Non-empty line outside the comment
m = self.re_px4_param_default_definition.match(line)
# Default value handling
if m:
name_m, defval_m = m.group(1,2)
default_var[name_m] = defval_m
m = self.re_parameter_definition.match(line)
if m:
tp, name, defval = m.group(1, 2, 3)
else:
m = self.re_px4_parameter_definition.match(line)
if m:
tp, name = m.group(1, 2)
if (name+'_DEFAULT') in default_var:
defval = default_var[name+'_DEFAULT']
if tp is not None:
# Remove trailing type specifier from numbers: 0.1f => 0.1
if defval != "" and self.re_is_a_number.match(defval):
defval = self.re_cut_type_specifier.sub('', defval)
param = Parameter(name, tp, defval)
param.SetField("short_desc", name)
# If comment was found before the parameter declaration,
# inject its data into the newly created parameter.
group = "Miscellaneous"
if state == "comment-processed":
if short_desc is not None:
param.SetField("short_desc", self.re_remove_dots.sub('', short_desc))
if long_desc is not None:
long_desc = self.re_remove_carriage_return.sub(' ', long_desc)
param.SetField("long_desc", long_desc)
for tag in tags:
if tag == "group":
group = tags[tag]
elif tag == "volatile":
param.SetVolatile()
elif tag == "category":
param.SetCategory(tags[tag])
elif tag == "boolean":
param.SetBoolean()
elif tag not in self.valid_tags:
sys.stderr.write("Skipping invalid documentation tag: '%s'\n" % tag)
return False
else:
param.SetField(tag, tags[tag])
for def_value in def_values:
param.SetEnumValue(def_value, def_values[def_value])
for def_bit in def_bitmask:
param.SetBitmaskBit(def_bit, def_bitmask[def_bit])
# Store the parameter
if group not in self.param_groups:
self.param_groups[group] = ParameterGroup(group)
self.param_groups[group].AddParameter(param)
state = None
return True
def IsNumber(self, numberString):
try:
float(numberString)
return True
except ValueError:
return False
def Validate(self):
"""
Validates the parameter meta data.
"""
seenParamNames = []
#allowedUnits should match set defined in /Firmware/validation/module_schema.yaml
allowedUnits = set ([
'%', 'Hz', 'mAh',
'rad', '%/rad', 'rad/s', 'rad/s^2', '%/rad/s', 'rad s^2/m','rad s/m',
'bit/s', 'B/s',
'deg', 'deg*1e7', 'deg/s',
'celcius', 'gauss', 'gauss/s', 'mgauss', 'mgauss^2',
'hPa', 'kg', 'kg/m^2', 'kg m^2',
'mm', 'm', 'm/s', 'm^2', 'm/s^2', 'm/s^3', 'm/s^2/sqrt(Hz)', 'm/s/rad',
'Ohm', 'V',
'us', 'ms', 's',
'S', 'A/%', '(m/s^2)^2', 'm/m', 'tan(rad)^2', '(m/s)^2', 'm/rad',
'm/s^3/sqrt(Hz)', 'm/s/sqrt(Hz)', 's/(1000*PWM)', '%m/s', 'min', 'us/C',
'N/(m/s)', 'Nm/(rad/s)', 'Nm', 'N',
'normalized_thrust/s', 'normalized_thrust', 'norm', 'SD',''])
for group in self.GetParamGroups():
for param in group.GetParams():
name = param.GetName()
if len(name) > 16:
sys.stderr.write("Parameter Name {0} is too long (Limit is 16)\n".format(name))
return False
board = param.GetFieldValue("board")
# Check for duplicates
name_plus_board = name + "+" + board
for seenParamName in seenParamNames:
if seenParamName == name_plus_board:
sys.stderr.write("Duplicate parameter definition: {0}\n".format(name_plus_board))
return False
seenParamNames.append(name_plus_board)
# Validate values
default = param.GetDefault()
min = param.GetFieldValue("min")
max = param.GetFieldValue("max")
units = param.GetFieldValue("unit")
if units not in allowedUnits:
sys.stderr.write("Invalid unit in {0}: {1}\n".format(name, units))
return False
#sys.stderr.write("{0} default:{1} min:{2} max:{3}\n".format(name, default, min, max))
if default != "" and not self.IsNumber(default):
sys.stderr.write("Default value not number: {0} {1}\n".format(name, default))
return False
# if default != "" and "." not in default:
# sys.stderr.write("Default value does not contain dot (e.g. 10 needs to be written as 10.0): {0} {1}\n".format(name, default))
# return False
if min != "":
if not self.IsNumber(min):
sys.stderr.write("Min value not number: {0} {1}\n".format(name, min))
return False
if default != "" and float(default) < float(min):
sys.stderr.write("Default value is smaller than min: {0} default:{1} min:{2}\n".format(name, default, min))
return False
if max != "":
if not self.IsNumber(max):
sys.stderr.write("Max value not number: {0} {1}\n".format(name, max))
return False
if default != "" and float(default) > float(max):
sys.stderr.write("Default value is larger than max: {0} default:{1} max:{2}\n".format(name, default, max))
return False
for code in param.GetEnumCodes():
if not self.IsNumber(code):
sys.stderr.write("Min value not number: {0} {1}\n".format(name, code))
return False
if param.GetEnumValue(code) == "":
sys.stderr.write("Description for enum value is empty: {0} {1}\n".format(name, code))
return False
for index in param.GetBitmaskList():
if not self.IsNumber(index):
sys.stderr.write("bit value not number: {0} {1}\n".format(name, index))
return False
if not int(min) <= math.pow(2, int(index)) <= int(max):
sys.stderr.write("Bitmask bit must be between {0} and {1}: {2} {3}\n".format(min, max, name, math.pow(2, int(index))))
return False
if param.GetBitmaskBit(index) == "":
sys.stderr.write("Description for bitmask bit is empty: {0} {1}\n".format(name, index))
return False
return True
def GetParamGroups(self):
"""
Returns the parsed list of parameters. Every parameter is a Parameter
object. Note that returned object is not a copy. Modifications affect
state of the parser.
"""
groups = self.param_groups.values()
groups = sorted(groups, key=lambda x: x.GetName())
groups = sorted(groups, key=lambda x: self.priority.get(x.GetName(), 0), reverse=True)
return groups
| 41.091743
| 156
| 0.473934
|
ef71936d2fe24f3bd2e45de20501988562e07721
| 63,126
|
py
|
Python
|
lib/pyz80.py
|
simonowen/vscode-pyz80
|
762eaeb2f83dc6646d14fba2cfdf1a1f4d0dd6c5
|
[
"MIT"
] | 1
|
2017-12-18T23:47:35.000Z
|
2017-12-18T23:47:35.000Z
|
lib/pyz80.py
|
simonowen/vscode-pyz80
|
762eaeb2f83dc6646d14fba2cfdf1a1f4d0dd6c5
|
[
"MIT"
] | 2
|
2021-11-10T20:38:11.000Z
|
2022-01-06T23:30:39.000Z
|
lib/pyz80.py
|
simonowen/vscode-pyz80
|
762eaeb2f83dc6646d14fba2cfdf1a1f4d0dd6c5
|
[
"MIT"
] | 1
|
2021-11-09T15:02:48.000Z
|
2021-11-09T15:02:48.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
# TODO: define and assemble macro blocks
# added FILESIZE("filename")
# defs doesn't cause bytes to be written to output unless real data follows
def printusage():
print("pyz80 by Andrew Collier, modified by Simon Owen")
print(" https://github.com/simonowen/pyz80/")
print("Usage:")
print(" pyz80 (options) inputfile(s)")
print("Options:")
print("-o outputfile")
print(" save the resulting disk image at the given path")
print("--nozip")
print(" do not compress the resulting disk image")
print("-I filepath")
print(" Add this file to the disk image before assembling")
print(" May be used multiple times to add multiple files")
print("--obj=outputfile")
print(" save the output code as a raw binary file at the given path")
print("-D symbol")
print("-D symbol=value")
print(" Define a symbol before parsing the source")
print(" (value is integer; if omitted, assume 1)")
print("--exportfile=filename")
print(" Save all symbol information into the given file")
print("--importfile=filename")
print(" Define symbols before assembly, from a file previously exported")
print("--mapfile=filename")
print(" Save address-to-symbol map into the given file")
print("--lstfile=filename")
print(" Produce assembly listing into given file")
print("--case")
print(" treat source labels as case sensitive (as COMET itself did)")
print("--nobodmas")
print(" treat arithmetic operators without precedence (as COMET itself did)")
print("--intdiv")
print(" force all division to give an integer result (as COMET itself did)")
print("-s regexp")
print(" print the value of any symbols matching the given regular expression")
print(" This may be used multiple times to output more than one subset")
print("-e")
print(" use python's own error handling instead of trying to catch parse errors")
def printlicense():
print("This program is free software; you can redistribute it and/or modify")
print("it under the terms of the GNU General Public License as published by")
print("the Free Software Foundation; either version 2 of the License, or")
print("(at your option) any later version.")
print(" ")
print("This program is distributed in the hope that it will be useful,")
print("but WITHOUT ANY WARRANTY; without even the implied warranty of")
print("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the")
print("GNU General Public License for more details.")
print(" ")
print("You should have received a copy of the GNU General Public License")
print("along with this program; if not, write to the Free Software")
print("Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA")
import getopt
import sys, os, datetime
import array
import fileinput
import re
import gzip
import math # for use by expressions in source files
import random
# Try for native pickle (2.x), fall back on Python version (3.x)
try:
import cPickle as pickle
except ImportError:
import pickle
def new_disk_image():
image = array.array('B')
image.append(0)
targetsize = 80*10*2*512
# disk image is arranged as: tr 0 s 1-10, tr 128 s 1-10, tr 1 s 1-10, tr 129 s 1-10 etc
while len(image) < targetsize:
image.extend(image)
while len(image) > targetsize:
image.pop()
return image
def dsk_at(track,side,sector):
return (track*20+side*10+(sector-1))*512
# uses numbering 1-10 for sectors, because SAMDOS internal format also does
def add_file_to_disk_image(image, filename, codestartpage, codestartoffset, execpage=0, execoffset=0, filelength=None, fromfile=None ):
global firstpageoffset
if fromfile != None:
modified = datetime.datetime.fromtimestamp(os.path.getmtime(fromfile))
fromfilefile = open(fromfile,'rb')
fromfilefile.seek(0,2)
filelength = fromfilefile.tell()
fromfilefile.seek(0)
fromfile = array.array('B')
fromfile.fromfile(fromfilefile, filelength)
else:
modified = datetime.datetime.now()
sectors_already_used = 0
# we're writing the whole image, so we can take a bit of a shortcut
# instead of reading the entire sector map to find unused space, we can assume all files are contiguous
# and place new files just at the end of the used space
#find an unused directory entry
for direntry in range(80):
dirpos = dsk_at(direntry//20,0,1+(direntry%20)//2) + 256*(direntry%2)
if image[dirpos] == 0:
break
else:
sectors_already_used += image[dirpos+11]*256 +image[dirpos+12]
else:
fatal ("Too many files for dsk format")
image[dirpos] = 19 # code file
for i in range(10):
image[dirpos+1+i] = ord((filename+" ")[i])
nsectors = 1 + (filelength+9)//510
image[dirpos+11] = nsectors // 256 # MSB number of sectors used
image[dirpos+12] = nsectors % 256 # LSB number of sectors used
starting_side = (4 + sectors_already_used//10)//80
starting_track = (4 + sectors_already_used//10)%80
starting_sector = sectors_already_used%10 + 1
image[dirpos+13] = starting_track + 128*starting_side # starting track
image[dirpos+14] = starting_sector # starting sector
# 15 - 209 sector address map
# write table of used sectors (can precalculate from number of used bits)
while nsectors > 0:
image[dirpos+15 + sectors_already_used//8] |= (1 << (sectors_already_used & 7))
sectors_already_used += 1
nsectors -= 1
# 210-219 MGT future and past (reserved)
image[dirpos+220] = 0 # flags (reserved)
# 221-231 File type information (n/a for code files)
# 232-235 reserved
image[dirpos+236] = codestartpage # start page number
image[dirpos+237] = (codestartoffset%256) # page offset (in section C, 0x8000 - 0xbfff)
image[dirpos+238] = 128 + (codestartoffset // 256)
image[dirpos+239] = filelength//16384 # pages in length
image[dirpos+240] = filelength%256 # file length % 16384
image[dirpos+241] = (filelength%16384)//256
if (execpage>0) :
image[dirpos+242] = execpage # execution address or 255 255 255 (basicpage, L, H - offset in page C)
image[dirpos+243] = execoffset % 256
image[dirpos+244] = (execoffset%16384)//256 + 128
else:
image[dirpos+242] = 255 # execution address or 255 255 255 (basicpage, L, H - offset in page C)
image[dirpos+243] = 255
image[dirpos+244] = 255
image[dirpos+245] = modified.day
image[dirpos+246] = modified.month
image[dirpos+247] = modified.year % 100 + 100
image[dirpos+248] = modified.hour
image[dirpos+249] = modified.minute
side = starting_side
track = starting_track
sector = starting_sector
fpos = 0
# write file's 9 byte header and file
imagepos = dsk_at(track,side,sector)
# 0 File type 19 for a code file
image[imagepos + 0] = 19
# 1-2 Modulo length Length of file % 16384
image[imagepos + 1] = filelength%256
image[imagepos + 2] = (filelength%16384)//256
# 3-4 Offset start Start address
image[imagepos + 3] = (codestartoffset%256)
image[imagepos + 4] = 128 + (codestartoffset // 256)
# 5-6 Unused
# 7 Number of pages
image[imagepos + 7] = filelength//16384
# 8 Starting page number
image[imagepos + 8] = codestartpage
start_of_file = True
while fpos < filelength:
imagepos = dsk_at(track,side,sector)
unadjustedimagepos = imagepos
if start_of_file:
if filelength > 500:
copylen = 501
else:
copylen = filelength
imagepos += 9
start_of_file = False
else:
if (filelength-fpos) > 509:
copylen = 510
else:
copylen = (filelength-fpos)
if fromfile != None:
image[imagepos:imagepos+copylen] = fromfile[fpos:fpos+copylen]
else:
if ((fpos+firstpageoffset)//16384) == (((fpos+codestartoffset)+copylen-1)//16384):
if memory[codestartpage+(fpos+codestartoffset)//16384] != '':
image[imagepos:imagepos+copylen] = memory[codestartpage+(fpos+firstpageoffset)//16384][(fpos+codestartoffset)%16384 : (fpos+codestartoffset)%16384+copylen]
else:
copylen1 = 16384 - ((fpos+codestartoffset)%16384)
page1 = (codestartpage+(fpos+codestartoffset)//16384)
if memory[page1] != '':
image[imagepos:imagepos+copylen1] = memory[page1][(fpos+codestartoffset)%16384 : ((fpos+codestartoffset)%16384)+copylen1]
if (page1 < 31) and memory[page1+1] != '':
image[imagepos+copylen1:imagepos+copylen] = memory[page1+1][0 : ((fpos+codestartoffset)+copylen)%16384]
fpos += copylen
sector += 1
if sector == 11:
sector = 1
track += 1
if track == 80:
track = 0
side += 1
if side==2:
fatal("Disk full writing "+filename)
# pointers to next sector and track
if (fpos < filelength):
image[unadjustedimagepos+510] = track + 128*side
image[unadjustedimagepos+511] = sector
def array_bytes(arr):
return arr.tobytes() if hasattr(arr, "tobytes") else arr.tostring()
def save_disk_image(image, pathname):
imagestr = array_bytes(image)
if ZIP:
dskfile = gzip.open(pathname, 'wb')
else:
dskfile = open(pathname, 'wb')
dskfile.write(imagestr)
dskfile.close()
def save_memory(memory, image=None, filename=None):
global firstpage,firstpageoffset
if firstpage==32:
# code was assembled without using a DUMP directive
firstpage = 1
firstpageoffset = 0
if memory[firstpage] != '':
# check that something has been assembled at all
filelength = (lastpage - firstpage + 1) * 16384
filelength -= firstpageoffset
filelength -= 16384-lastpageoffset
if (autoexecpage>0) :
savefilename = ("AUTO" + filename + " ")[:8]+".O"
else:
savefilename = (filename + " ")[:8]+".O"
if image:
add_file_to_disk_image(image,savefilename,firstpage, firstpageoffset, autoexecpage, autoexecorigin, filelength)
else:
save_memory_to_file(filename, firstpage, firstpageoffset, filelength)
def save_file_to_image(image, pathname):
sam_filename = os.path.basename(pathname)
if len(sam_filename)>10:
if sam_filename.count("."):
extpos = sam_filename.rindex(".")
extlen = len(sam_filename)-extpos
sam_filename = sam_filename[:10-extlen] + sam_filename[extpos:]
else:
sam_filename = sam_filename[:10]
add_file_to_disk_image(image,sam_filename, 1, 0, fromfile=pathname)
def save_memory_to_file(filename, firstusedpage, firstpageoffset, filelength):
objfile = open(filename, 'wb')
flen = filelength
page = firstusedpage
offset = firstpageoffset
while flen:
wlen = min(16384-offset, flen)
if memory[page] != "":
pagestr = array_bytes(memory[page])
objfile.write(pagestr[offset:offset+wlen])
else:
# write wlen nothings into the file
objfile.seek(wlen,1)
flen -= wlen
page += 1
offset=0
objfile.close()
def warning(message):
print(global_currentfile, 'warning:', message)
print('\t', global_currentline.strip())
def fatal(message):
print(global_currentfile, 'error:', message)
print ('\t', global_currentline.strip())
sys.exit(1)
def expand_symbol(sym):
while 1:
match = re.search(r'\{([^\{\}]*)\}', sym)
if match:
value = parse_expression(match.group(1))
sym = sym.replace(match.group(0),str(value))
else:
break
return sym
def file_and_stack(explicit_currentfile=None):
if explicit_currentfile==None:
explicit_currentfile = global_currentfile
f,l = explicit_currentfile.rsplit(':', 1)
s=''
for i in forstack:
s=s+"^"+str(i[2])
return f+s+':'+l
def set_symbol(sym, value, explicit_currentfile=None, is_label=False):
symorig = expand_symbol(sym)
sym = symorig if CASE else symorig.upper()
if sym[0]=='@':
sym = sym + '@' + file_and_stack(explicit_currentfile=explicit_currentfile)
symboltable[sym] = value
if sym != symorig:
symbolcase[sym] = symorig
if is_label:
labeltable[sym] = value
def get_symbol(sym):
symorig = expand_symbol(sym)
sym = symorig if CASE else symorig.upper()
if sym[0]=='@':
if (sym + '@' + file_and_stack()) in symboltable:
return symboltable[sym + '@' + file_and_stack()]
else:
if len(sym) > 1 and (sym[1]=='-' or sym[1]=='+'):
directive = sym[1]
sym = sym[0]+sym[2:]
else:
directive=''
reqfile,reqline = file_and_stack().rsplit(':', 1)
reqline = int(reqline)
closestKey = None
for key in symboltable:
if (sym+'@'+reqfile+":").startswith(key.rsplit(":",1)[0]+":") or (sym+'@'+reqfile+":").startswith(key.rsplit(":",1)[0]+"^"):
# key is allowed fewer layers of FOR stack, but any layers it has must match
# ensure a whole number (ie 1 doesn't match 11) by forceing a colon or hat
symfile,symline = key.rsplit(':', 1)
symline=int(symline)
difference = reqline - symline
if (difference < 0 or directive != '+') and (difference > 0 or directive != '-') and ((closestKey == None) or (abs(difference) < closest)):
closest = abs(difference)
closestKey = key
if (not closestKey) and (directive == '-'):
global include_stack
use_include_stack = include_stack
use_include_stack.reverse()
# try searching up the include stack
for include_item in use_include_stack:
include_file, include_line = include_item[1].rsplit(":",1)
if not closestKey:
for key in symboltable:
if (sym+'@'+include_file+":").startswith(key.rsplit(":",1)[0]+":") or (sym+'@'+include_file+":").startswith(key.rsplit(":",1)[0]+"^"):
# key is allowed fewer layers of FOR stack, but any layers it has must match
# ensure a whole number (ie 1 doesn't match 11) by forceing a colon or hat
symfile,symline = key.rsplit(':', 1)
symline=int(symline)
difference = int(include_line) - symline
if (difference < 0 or directive != '+') and (difference > 0 or directive != '-') and ((closestKey == None) or (abs(difference) < closest)):
closest = abs(difference)
closestKey = key
if closestKey != None:
sym = closestKey
if sym in symboltable:
symusetable[sym] = symusetable.get(sym,0)+1
return symboltable[sym]
return None
def parse_expression(arg, signed=0, byte=0, word=0, silenterror=0):
if ',' in arg:
if silenterror:
return ''
fatal("Erroneous comma in expression"+arg)
while 1:
match = re.search('"(.)"', arg)
if match:
arg = arg.replace('"'+match.group(1)+'"',str(ord(match.group(1))))
else:
break
while 1:
match = re.search(r'defined\s*\(\s*(.*?)\s*\)', arg, re.IGNORECASE)
if match:
result = (get_symbol(match.group(1)) != None)
arg = arg.replace(match.group(0),str(int(result)))
else:
break
arg = arg.replace('$','('+str(origin)+')')
arg = arg.replace('%','0b') # COMET syntax for binary literals (parsed later, change to save confusion with modulus)
arg = arg.replace('\\','%') # COMET syntax for modulus
arg = re.sub('&([0-9a-fA-F]+\\b)', '0x\g<1>', arg) # COMET syntax for hex numbers
if INTDIV:
arg = re.sub(r'(?<!/)/(?!/)', r'//', arg) # COMET integer division
# don't do these except at the start of a token:
arg = re.sub('\\b0X', '0x', arg) # darnit, this got capitalized
arg = re.sub('\\b0B', '0b', arg) # darnit, this got capitalized
# if the argument contains letters at this point,
# it's a symbol which needs to be replaced
testsymbol=''
argcopy = ''
incurly = 0
inquotes = False
for c in arg+' ':
if c.isalnum() or c in '"_.@{}' or (c=="+" and testsymbol=='@') or (c=="-" and testsymbol=='@') or incurly or inquotes:
testsymbol += c
if c=='{':
incurly += 1
elif c=='}':
incurly -= 1
elif c=='"':
inquotes = not inquotes
else:
if (testsymbol != ''):
if not testsymbol[0].isdigit():
result = get_symbol(testsymbol)
if (result != None):
testsymbol = str(result)
elif testsymbol[0] == '"' and testsymbol[-1]=='"':
# string literal used in some expressions
pass
else:
understood = 0
# some of python's math expressions should be available to the parser
if not understood and testsymbol.lower() != 'e':
parsestr = 'math.'+testsymbol.lower()
try:
eval(parsestr)
understood = 1
except:
understood = 0
if not understood:
parsestr = 'random.'+testsymbol.lower()
try:
eval(parsestr)
understood = 1
except:
understood = 0
if testsymbol in ["FILESIZE"]:
parsestr = 'os.path.getsize'
understood = 1
if not understood :
if silenterror:
return ''
fatal("Error in expression "+arg+": Undefined symbol "+expand_symbol(testsymbol))
testsymbol = parsestr
elif testsymbol[0]=='0' and len(testsymbol)>2 and testsymbol[1]=='b':
# binary literal
literal = 0
for digit in testsymbol[2:]:
literal *= 2
if digit == '1':
literal += 1
elif digit != '0':
fatal("Invalid binary digit '"+digit+"'")
testsymbol = str(literal)
elif testsymbol[0]=='0' and len(testsymbol)>1 and testsymbol[1]!='x':
# literals with leading zero would be treated as octal,
# COMET source files do not expect this
decimal = testsymbol
while decimal[0] == '0' and len(decimal)>1:
decimal = decimal[1:]
testsymbol = decimal
argcopy += testsymbol
testsymbol = ''
argcopy += c
if NOBODMAS:
# add bracket pairs at interesting locations to simulate left-to-right evaluation
aslist = list(argcopy) # turn it into a list so that we can add characters without affecting indexes
bracketstack=[0]
symvalid = False
for c in range (len(aslist)):
if aslist[c] == "(":
bracketstack = [c]+bracketstack
elif aslist[c] == ")":
bracketstack = bracketstack[1:]
elif (not aslist[c].isalnum()) and (not aslist[c]=='.') and (not aslist[c].isspace()) and symvalid:
aslist[c] = ")"+aslist[c]
aslist[bracketstack[0]] = '('+aslist[bracketstack[0]]
symvalid = False
elif aslist[c].isalnum():
symvalid = True
argcopy2=""
for entry in aslist:
argcopy2 += entry
# print(argcopy,"->",argcopy2)
argcopy = argcopy2
narg = int(eval(argcopy))
# print(arg, " -> ",argcopy," == ",narg)
if not signed:
if byte:
if narg < -128 or narg > 255:
warning ("Unsigned byte value truncated from "+str(narg))
narg %= 256
elif word:
if narg < -32768 or narg > 65535:
warning ("Unsigned word value truncated from "+str(narg))
narg %= 65536
return narg
def double(arg, allow_af_instead_of_sp=0, allow_af_alt=0, allow_index=1):
# decode double register [bc, de, hl, sp][ix,iy] --special: af af'
double_mapping = {'BC':([],0), 'DE':([],1), 'HL':([],2), 'SP':([],3), 'IX':([0xdd],2), 'IY':([0xfd],2), 'AF':([],5), "AF'":([],4) }
rr = double_mapping.get(arg.strip().upper(),([],-1))
if (rr[1]==3) and allow_af_instead_of_sp:
rr = ([],-1)
if rr[1]==5:
if allow_af_instead_of_sp:
rr = ([],3)
else:
rr = ([],-1)
if (rr[1]==4) and not allow_af_alt:
rr = ([],-1)
if (rr[0] != []) and not allow_index:
rr = ([],-1)
return rr
def single(arg, allow_i=0, allow_r=0, allow_index=1, allow_offset=1, allow_half=1):
#decode single register [b,c,d,e,h,l,(hl),a][(ix {+c}),(iy {+c})]
single_mapping = {'B':0, 'C':1, 'D':2, 'E':3, 'H':4, 'L':5, 'A':7, 'I':8, 'R':9, 'IXH':10, 'IXL':11, 'IYH':12, 'IYL':13 }
m = single_mapping.get(arg.strip().upper(),-1)
prefix=[]
postfix=[]
if m==8 and not allow_i:
m = -1
if m==9 and not allow_r:
m = -1
if allow_half:
if m==10:
prefix = [0xdd]
m = 4
if m==11:
prefix = [0xdd]
m = 5
if m==12:
prefix = [0xfd]
m = 4
if m==13:
prefix = [0xfd]
m = 5
else:
if m >= 10 and m <= 13:
m = -1
if m==-1 and re.search(r"\A\s*\(\s*HL\s*\)\s*\Z", arg, re.IGNORECASE):
m = 6
if m==-1 and allow_index:
match = re.search(r"\A\s*\(\s*(I[XY])\s*\)\s*\Z", arg, re.IGNORECASE)
if match:
m = 6
prefix = [0xdd] if match.group(1).lower() == 'ix' else [0xfd]
postfix = [0]
elif allow_offset:
match = re.search(r"\A\s*\(\s*(I[XY])\s*([+-].*)\s*\)\s*\Z", arg, re.IGNORECASE)
if match:
m = 6
prefix = [0xdd] if match.group(1).lower() == 'ix' else [0xfd]
if p==2:
offset = parse_expression(match.group(2), byte=1, signed=1)
if offset < -128 or offset > 127:
fatal ("invalid index offset: "+str(offset))
postfix = [(offset + 256) % 256]
else:
postfix = [0]
return prefix,m,postfix
def condition(arg):
# decode condition [nz, z, nc, c, po, pe, p, m]
condition_mapping = {'NZ':0, 'Z':1, 'NC':2, 'C':3, 'PO':4, 'PE':5, 'P':6, 'M':7 }
return condition_mapping.get(arg.upper(),-1)
def dump(bytes):
def initpage(page):
memory[page] = array.array('B')
memory[page].append(0)
while len(memory[page]) < 16384:
memory[page].extend(memory[page])
global dumppage, dumporigin, dumpspace_pending, lstcode, listingfile
if (p==2):
if dumpspace_pending > 0:
if memory[dumppage]=='':
initpage(dumppage)
dumporigin += dumpspace_pending
dumppage += dumporigin // 16384
dumporigin %= 16384
dumpspace_pending = 0
if memory[dumppage]=='':
initpage(dumppage)
lstcode = ""
for b in bytes:
# if b<0 or b>255:
# warning("Dump byte out of range")
memory[dumppage][dumporigin] = b
if listingfile != None:
lstcode=lstcode+"%02X "%(b)
dumporigin += 1
if dumporigin == 16384:
dumporigin = 0
dumppage += 1
if memory[dumppage]=='':
initpage(dumppage)
def check_args(args,expected):
if args=='':
received = 0
else:
received = len(args.split(','))
if expected!=received:
fatal("Opcode wrong number of arguments, expected "+str(expected)+" received "+str(args))
def op_ORG(p,opargs):
global origin
check_args(opargs,1)
origin = parse_expression(opargs, word=1)
return 0
def op_DUMP(p,opargs):
global dumppage, dumporigin, dumpused, firstpage, firstpageoffset, dumpspace_pending
if dumpused:
check_lastpage()
dumpused = True
dumpspace_pending = 0
if ',' in opargs:
page,offset = opargs.split(',',1)
offset = parse_expression(offset, word=1)
dumppage = parse_expression(page) + (offset//16384)
dumporigin = offset % 16384
else:
offset = parse_expression(opargs)
if (offset<16384):
fatal("DUMP value out of range")
dumppage = (offset//16384) - 1
dumporigin = offset % 16384
if ((dumppage*16384 + dumporigin) < (firstpage*16384 + firstpageoffset)):
firstpage = dumppage
firstpageoffset = dumporigin
return 0
def op_PRINT(p, opargs):
text = []
for expr in opargs.split(","):
if expr.strip().startswith('"'):
text.append(expr.strip().rstrip()[1:-1])
else:
a = parse_expression(expr, silenterror=True)
if a:
text.append(str(a))
else:
text.append("?")
print(global_currentfile, "PRINT: ", ",".join(text))
return 0
def check_lastpage():
global lastpage, lastpageoffset
if dumppage > lastpage:
lastpage = dumppage
lastpageoffset = dumporigin
elif (dumppage == lastpage) and (dumporigin > lastpageoffset):
lastpageoffset = dumporigin
def op_AUTOEXEC(p,opargs):
global autoexecpage, autoexecorigin
check_args(opargs,0)
if (p==2):
if (autoexecpage>0 or autoexecorigin>0):
fatal("AUTOEXEC may only be used once.")
autoexecpage = dumppage + 1 # basic type page numbering
autoexecorigin = dumporigin
return 0
def op_EQU(p,opargs):
global symboltable
check_args(opargs,1)
if (symbol):
if opargs.upper().startswith("FOR") and (opargs[3].isspace() or opargs[3]=='('):
set_symbol(symbol, 0)
limit = parse_expression(opargs[4:].strip())
if limit < 1:
fatal("FOR range < 1 not allowed")
forstack.append( [symbol,global_currentfile,0,limit] )
else:
if p==1:
set_symbol(symbol, parse_expression(opargs, signed=1, silenterror=1))
else:
expr_result = parse_expression(opargs, signed=1)
existing = get_symbol(symbol)
if existing == '':
set_symbol(symbol, expr_result)
elif existing != expr_result:
fatal("Symbol "+expand_symbol(symbol)+": expected "+str(existing)+" but calculated "+str(expr_result)+", has this symbol been used twice?")
else:
warning("EQU without symbol name")
return 0
def op_NEXT(p,opargs):
global global_currentfile
check_args(opargs,1)
foritem = forstack.pop()
if opargs != foritem[0]:
fatal("NEXT symbol "+opargs+" doesn't match FOR: expected "+foritem[0])
foritem[2] += 1
set_symbol(foritem[0], foritem[2], explicit_currentfile=foritem[1])
if foritem[2] < foritem[3]:
global_currentfile = foritem[1]
forstack.append(foritem)
return 0
def op_ALIGN(p,opargs):
global dumpspace_pending
check_args(opargs,1)
align = parse_expression(opargs)
if align<1:
fatal("Invalid alignment")
elif (align & (-align)) != align:
fatal("Alignment is not a power of 2")
s = (align - origin%align)%align
dumpspace_pending += s
return s
def op_DS(p,opargs):
return op_DEFS(p,opargs)
def op_DEFS(p,opargs):
global dumppage, dumporigin, dumpspace_pending
check_args(opargs,1)
if opargs.upper().startswith("ALIGN") and (opargs[5].isspace() or opargs[5]=='('):
return op_ALIGN(p,opargs[5:].strip())
s = parse_expression(opargs)
if s<0:
fatal("Allocated invalid space < 0 bytes ("+str(s)+")")
dumpspace_pending += s
return s
def op_DB(p,opargs):
return op_DEFB(p,opargs)
def op_DEFB(p,opargs):
s = opargs.split(',')
if (p==2):
for b in s:
byte=(parse_expression(b, byte=1, silenterror=1))
if byte=='':
fatal("Didn't understand DB or character constant "+b)
else:
dump([byte])
return len(s)
def op_DW(p,opargs):
return op_DEFW(p,opargs)
def op_DEFW(p,opargs):
s = opargs.split(',')
if (p==2):
for b in s:
b=(parse_expression(b, word=1))
dump([b%256, b//256])
return 2*len(s)
def op_DM(p,opargs):
return op_DEFM(p,opargs)
def op_DEFM(p,opargs):
messagelen = 0
if opargs.strip()=="44" or opargs=="(44)":
dump ([44])
messagelen = 1
else:
matchstr = opargs
while matchstr.strip():
match = re.match(r'\s*\"(.*)\"(\s*,)?(.*)', matchstr)
if not match:
match = re.match(r'\s*([^,]*)(\s*,)?(.*)', matchstr)
byte=(parse_expression(match.group(1), byte=1, silenterror=1))
if byte=='':
fatal("Didn't understand DM character constant "+match.group(1))
elif p==2:
dump([byte])
messagelen += 1
else:
message = list(match.group(1))
if p==2:
for i in message:
dump ([ord(i)])
messagelen += len(message)
matchstr = match.group(3)
if match.group(3) and not match.group(2):
matchstr = '""' + matchstr
# For cases such as DEFM "message with a "" in it"
# I can only apologise for this, this is an artefact of my parsing quotes
# badly at the top level but it's too much for me to go back and refactor it all.
# Of course, it would have helped if Comet had had sane quoting rules in the first place.
return messagelen
def op_MDAT(p,opargs):
global dumppage, dumporigin
match = re.search(r'\A\s*\"(.*)\"\s*\Z', opargs)
filename = os.path.join(global_path, match.group(1))
try:
mdatfile = open(filename,'rb')
except:
fatal("Unable to open file for reading: "+filename)
mdatfile.seek(0,2)
filelength = mdatfile.tell()
if p==1:
dumporigin += filelength
dumppage += dumporigin // 16384
dumporigin %= 16384
elif p==2:
mdatfile.seek(0)
mdatafilearray = array.array('B')
mdatafilearray.fromfile(mdatfile, filelength)
dump(mdatafilearray)
mdatfile.close()
return filelength
def op_INCLUDE(p,opargs):
global global_path, global_currentfile
global include_stack
match = re.search(r'\A\s*\"(.*)\"\s*\Z', opargs)
filename = match.group(1)
include_stack.append((global_path, global_currentfile))
assembler_pass(p, filename)
global_path, global_currentfile = include_stack.pop()
return 0
# global origin has already been updated
def op_FOR(p,opargs):
args = opargs.split(',',1)
limit = parse_expression(args[0])
bytes = 0
for iterate in range(limit):
symboltable['FOR'] = iterate
if CASE:
symboltable['for'] = iterate
bytes += assemble_instruction(p,args[1].strip())
if limit != 0:
del symboltable['FOR']
if CASE:
del symboltable['for']
return bytes
def op_noargs_type(p,opargs,instr):
check_args(opargs,0)
if (p==2):
dump(instr)
return len(instr)
def op_ASSERT(p,opargs):
check_args(opargs,1)
if (p==2):
value = parse_expression(opargs)
if value == 0:
fatal("Assertion failed ("+opargs+")")
return 0
def op_NOP(p,opargs):
return op_noargs_type(p,opargs,[0x00])
def op_RLCA(p,opargs):
return op_noargs_type(p,opargs,[0x07])
def op_RRCA(p,opargs):
return op_noargs_type(p,opargs,[0x0F])
def op_RLA(p,opargs):
return op_noargs_type(p,opargs,[0x17])
def op_RRA(p,opargs):
return op_noargs_type(p,opargs,[0x1F])
def op_DAA(p,opargs):
return op_noargs_type(p,opargs,[0x27])
def op_CPL(p,opargs):
return op_noargs_type(p,opargs,[0x2F])
def op_SCF(p,opargs):
return op_noargs_type(p,opargs,[0x37])
def op_CCF(p,opargs):
return op_noargs_type(p,opargs,[0x3F])
def op_HALT(p,opargs):
return op_noargs_type(p,opargs,[0x76])
def op_DI(p,opargs):
return op_noargs_type(p,opargs,[0xf3])
def op_EI(p,opargs):
return op_noargs_type(p,opargs,[0xfb])
def op_EXX(p,opargs):
return op_noargs_type(p,opargs,[0xd9])
def op_NEG(p,opargs):
return op_noargs_type(p,opargs,[0xed,0x44])
def op_RETN(p,opargs):
return op_noargs_type(p,opargs,[0xed,0x45])
def op_RETI(p,opargs):
return op_noargs_type(p,opargs,[0xed,0x4d])
def op_RRD(p,opargs):
return op_noargs_type(p,opargs,[0xed,0x67])
def op_RLD(p,opargs):
return op_noargs_type(p,opargs,[0xed,0x6F])
def op_LDI(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa0])
def op_CPI(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa1])
def op_INI(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa2])
def op_OUTI(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa3])
def op_LDD(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa8])
def op_CPD(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xa9])
def op_IND(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xaa])
def op_OUTD(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xab])
def op_LDIR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb0])
def op_CPIR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb1])
def op_INIR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb2])
def op_OTIR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb3])
def op_LDDR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb8])
def op_CPDR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xb9])
def op_INDR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xba])
def op_OTDR(p,opargs):
return op_noargs_type(p,opargs,[0xed,0xbb])
def op_cbshifts_type(p,opargs,offset,step_per_register=1):
args = opargs.split(',',1)
if len(args) == 2:
# compound instruction of the form RLC B,(IX+c)
pre1,r1,post1 = single(args[0], allow_half=0, allow_index=0)
pre2,r2,post2 = single(args[1], allow_half=0, allow_index=1)
if r1==-1 or r2==-1:
fatal("Registers not recognized for compound instruction")
if r1==6:
fatal("(HL) not allowed as target of compound instruction")
if len(pre2)==0:
fatal("Must use index register as operand of compound instruction")
instr=pre2
instr.extend([0xcb])
instr.extend(post2)
instr.append(offset + step_per_register*r1)
else:
check_args(opargs,1)
pre,r,post = single(opargs,allow_half=0)
instr = pre
instr.extend([0xcb])
instr.extend(post)
if r==-1:
fatal ("Invalid argument")
else:
instr.append(offset + step_per_register*r)
if (p==2):
dump(instr)
return len(instr)
def op_RLC(p,opargs):
return op_cbshifts_type(p,opargs,0x00)
def op_RRC(p,opargs):
return op_cbshifts_type(p,opargs,0x08)
def op_RL(p,opargs):
return op_cbshifts_type(p,opargs,0x10)
def op_RR(p,opargs):
return op_cbshifts_type(p,opargs,0x18)
def op_SLA(p,opargs):
return op_cbshifts_type(p,opargs,0x20)
def op_SRA(p,opargs):
return op_cbshifts_type(p,opargs,0x28)
def op_SLL(p,opargs):
if (p==1):
warning("SLL doesn't do what you probably expect on z80b! Use SL1 if you know what you're doing.")
return op_cbshifts_type(p,opargs,0x30)
def op_SL1(p,opargs):
return op_cbshifts_type(p,opargs,0x30)
def op_SRL(p,opargs):
return op_cbshifts_type(p,opargs,0x38)
def op_register_arg_type(p,opargs,offset,ninstr,step_per_register=1):
check_args(opargs,1)
pre,r,post = single(opargs,allow_half=1)
instr = pre
if r==-1:
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", opargs)
if match:
fatal ("Illegal indirection")
instr.extend(ninstr)
if (p==2):
n = parse_expression(opargs, byte=1)
else:
n = 0
instr.append(n)
else:
instr.append(offset + step_per_register*r)
instr.extend(post)
if (p==2):
dump(instr)
return len(instr)
def op_SUB(p,opargs):
return op_register_arg_type(p,opargs, 0x90, [0xd6])
def op_AND(p,opargs):
return op_register_arg_type(p,opargs, 0xa0, [0xe6])
def op_XOR(p,opargs):
return op_register_arg_type(p,opargs, 0xa8, [0xee])
def op_OR(p,opargs):
return op_register_arg_type(p,opargs, 0xb0, [0xf6])
def op_CP(p,opargs):
return op_register_arg_type(p,opargs, 0xb8, [0xfe])
def op_registerorpair_arg_type(p,opargs,rinstr,rrinstr,step_per_register=8,step_per_pair=16):
check_args(opargs,1)
pre,r,post = single(opargs)
if r==-1:
pre,rr = double(opargs)
if rr==-1:
fatal ("Invalid argument")
instr = pre
instr.append(rrinstr + step_per_pair*rr)
else:
instr = pre
instr.append(rinstr + step_per_register*r)
instr.extend(post)
if (p==2):
dump(instr)
return len(instr)
def op_INC(p,opargs):
# Oh dear - COMET also used "INC" for INClude source file
if '"' in opargs:
return op_INCLUDE(p,opargs)
return op_registerorpair_arg_type(p,opargs, 0x04, 0x03)
def op_DEC(p,opargs):
return op_registerorpair_arg_type(p,opargs, 0x05, 0x0b)
def op_add_type(p,opargs,rinstr,ninstr,rrinstr,step_per_register=1,step_per_pair=16):
args = opargs.split(',',1)
r=-1
if len(args) == 2:
pre,r,post = single(args[0])
if (len(args) == 1) or r==7:
pre,r,post = single(args[-1])
instr = pre
if r==-1:
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", args[-1])
if match:
fatal ("Illegal indirection")
instr.extend(ninstr)
if (p==2):
n = parse_expression(args[-1], byte=1)
else:
n = 0
instr.append(n)
else:
instr.extend(rinstr)
instr[-1] += step_per_register*r
instr.extend(post)
else:
pre,rr1 = double(args[0])
dummy,rr2 = double(args[1])
if (rr1 == rr2) and (pre != dummy):
fatal ("Can't mix index registers and HL")
if (len(rrinstr) > 1) and pre:
fatal ("Can't use index registers in this instruction")
if (len(args) != 2) or (rr1 != 2) or (rr2 == -1):
fatal("Invalid argument")
instr = pre
instr.extend(rrinstr)
instr[-1] += step_per_pair*rr2
if (p==2):
dump(instr)
return len(instr)
def op_ADD(p,opargs):
return op_add_type(p,opargs,[0x80], [0xc6],[0x09])
def op_ADC(p,opargs):
return op_add_type(p,opargs,[0x88], [0xce],[0xed,0x4a])
def op_SBC(p,opargs):
return op_add_type(p,opargs,[0x98], [0xde],[0xed,0x42])
def op_bit_type(p,opargs,offset):
check_args(opargs,2)
arg1,arg2 = opargs.split(',',1)
b = parse_expression(arg1)
if b>7 or b<0:
fatal ("argument out of range")
pre,r,post = single(arg2,allow_half=0)
if r==-1:
fatal ("Invalid argument")
instr = pre
instr.append(0xcb)
instr.extend(post)
instr.append(offset + r + 8*b)
if (p==2):
dump(instr)
return len(instr)
def op_BIT(p,opargs):
return op_bit_type(p,opargs, 0x40)
def op_RES(p,opargs):
return op_bit_type(p,opargs, 0x80)
def op_SET(p,opargs):
return op_bit_type(p,opargs, 0xc0)
def op_pushpop_type(p,opargs,offset):
check_args(opargs,1)
prefix, rr = double(opargs, allow_af_instead_of_sp=1)
instr = prefix
if rr==-1:
fatal ("Invalid argument")
else:
instr.append(offset + 16 * rr)
if (p==2):
dump(instr)
return len(instr)
def op_POP(p,opargs):
return op_pushpop_type(p,opargs, 0xc1)
def op_PUSH(p,opargs):
return op_pushpop_type(p,opargs, 0xc5)
def op_jumpcall_type(p,opargs,offset, condoffset):
args = opargs.split(',',1)
if len(args) == 1:
instr = [offset]
else:
cond = condition(args[0])
if cond == -1:
fatal ("Expected condition, received "+opargs)
instr = [condoffset + 8*cond]
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", args[-1])
if match:
fatal ("Illegal indirection")
if (p==2):
nn = parse_expression(args[-1],word=1)
instr.extend([nn%256, nn//256])
dump(instr)
return 3
def op_JP(p,opargs):
if (len(opargs.split(',',1)) == 1):
prefix, r, postfix = single(opargs, allow_offset=0,allow_half=0)
if r==6:
instr = prefix
instr.append(0xe9)
if (p==2):
dump(instr)
return len(instr)
return op_jumpcall_type(p,opargs, 0xc3, 0xc2)
def op_CALL(p,opargs):
return op_jumpcall_type(p,opargs, 0xcd, 0xc4)
def op_DJNZ(p,opargs):
check_args(opargs,1)
if (p==2):
target = parse_expression(opargs,word=1)
displacement = target - (origin + 2)
if displacement > 127 or displacement < -128:
fatal ("Displacement from "+str(origin)+" to "+str(target)+" is out of range")
dump([0x10,(displacement+256)%256])
return 2
def op_JR(p,opargs):
args = opargs.split(',',1)
if len(args) == 1:
instr = 0x18
else:
cond = condition(args[0])
if cond == -1:
fatal ("Expected condition, received "+opargs)
elif cond >= 4:
fatal ("Invalid condition for JR")
instr = 0x20 + 8*cond
if (p==2):
target = parse_expression(args[-1],word=1)
displacement = target - (origin + 2)
if displacement > 127 or displacement < -128:
fatal ("Displacement from "+str(origin)+" to "+str(target)+" is out of range")
dump([instr,(displacement+256)%256])
return 2
def op_RET(p,opargs):
if opargs=='':
if (p==2):
dump([0xc9])
else:
check_args(opargs,1)
cond = condition(opargs)
if cond == -1:
fatal ("Expected condition, received "+opargs)
if (p==2):
dump([0xc0 + 8*cond])
return 1
def op_IM(p,opargs):
check_args(opargs,1)
if (p==2):
mode = parse_expression(opargs)
if (mode>2) or (mode<0):
fatal ("argument out of range")
if mode > 0:
mode += 1
dump([0xed, 0x46 + 8*mode])
return 2
def op_RST(p,opargs):
check_args(opargs,1)
if (p==2):
vector = parse_expression(opargs)
if (vector>0x38) or (vector<0) or ((vector%8) != 0):
fatal ("argument out of range or doesn't divide by 8")
dump([0xc7 + vector])
return 1
def op_EX(p,opargs):
check_args(opargs,2)
args = opargs.split(',',1)
if re.search(r"\A\s*\(\s*SP\s*\)\s*\Z", args[0], re.IGNORECASE):
pre2,rr2 = double(args[1],allow_af_instead_of_sp=1, allow_af_alt=1, allow_index=1)
if rr2==2:
instr = pre2
instr.append(0xe3)
else:
fatal("Can't exchange "+args[0]+" with "+args[1])
else:
pre1,rr1 = double(args[0],allow_af_instead_of_sp=1, allow_index=0)
pre2,rr2 = double(args[1],allow_af_instead_of_sp=1, allow_af_alt=1, allow_index=0)
if rr1==1 and rr2==2:
# EX DE,HL
instr = pre1
instr.extend(pre2)
instr.append(0xeb)
elif (rr1==3 and rr2==4):
instr=[0x08]
else:
fatal("Can't exchange "+args[0]+" with "+args[1])
if (p==2):
dump(instr)
return len(instr)
def op_IN(p,opargs):
check_args(opargs,2)
args = opargs.split(',',1)
if (p==2):
pre,r,post = single(args[0],allow_index=0,allow_half=0)
if r!=-1 and r!=6 and re.search(r"\A\s*\(\s*C\s*\)\s*\Z", args[1], re.IGNORECASE):
dump([0xed, 0x40+8*r])
elif r==7:
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", args[1])
if match==None:
fatal("No expression in "+args[1])
n = parse_expression(match.group(1))
dump([0xdb, n])
else:
fatal("Invalid argument")
return 2
def op_OUT(p,opargs):
check_args(opargs,2)
args = opargs.split(',',1)
if (p==2):
pre,r,post = single(args[1],allow_index=0,allow_half=0)
if r!=-1 and r!=6 and re.search(r"\A\s*\(\s*C\s*\)\s*\Z", args[0], re.IGNORECASE):
dump([0xed, 0x41+8*r])
elif r==7:
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", args[0])
n = parse_expression(match.group(1))
dump([0xd3, n])
else:
fatal("Invalid argument")
return 2
def op_LD(p,opargs):
check_args(opargs,2)
arg1,arg2 = opargs.split(',',1)
prefix, rr1 = double(arg1)
if rr1 != -1:
prefix2, rr2 = double(arg2)
if rr1==3 and rr2==2:
instr = prefix2
instr.append(0xf9)
dump(instr)
return len(instr)
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", arg2)
if match:
# ld rr, (nn)
if p==2:
nn = parse_expression(match.group(1),word=1)
else:
nn = 0
instr = prefix
if rr1==2:
instr.extend([0x2a, nn%256, nn//256])
else:
instr.extend([0xed, 0x4b + 16*rr1, nn%256, nn//256])
dump(instr)
return len (instr)
else:
#ld rr, nn
if p==2:
nn = parse_expression(arg2,word=1)
else:
nn = 0
instr = prefix
instr.extend([0x01 + 16*rr1, nn%256, nn//256])
dump(instr)
return len (instr)
prefix, rr2 = double(arg2)
if rr2 != -1:
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", arg1)
if match:
# ld (nn), rr
if p==2:
nn = parse_expression(match.group(1))
else:
nn = 0
instr = prefix
if rr2==2:
instr.extend([0x22, nn%256, nn//256])
else:
instr.extend([0xed, 0x43 + 16*rr2, nn%256, nn//256])
dump(instr)
return len (instr)
prefix1,r1,postfix1 = single(arg1, allow_i=1, allow_r=1)
prefix2,r2,postfix2 = single(arg2, allow_i=1, allow_r=1)
if r1 != -1 :
if r2 != -1:
if (r1 > 7) or (r2 > 7):
if r1==7:
if r2==8:
dump([0xed,0x57])
return 2
elif r2==9:
dump([0xed,0x5f])
return 2
if r2==7:
if r1==8:
dump([0xed,0x47])
return 2
elif r1==9:
dump([0xed,0x4f])
return 2
fatal("Invalid argument")
if r1==6 and r2==6:
fatal("Ha - nice try. That's a HALT.")
if (r1==4 or r1==5) and (r2==4 or r2==5) and prefix1 != prefix2:
fatal("Illegal combination of operands")
if r1==6 and (r2==4 or r2==5) and len(prefix2) != 0:
fatal("Illegal combination of operands")
if r2==6 and (r1==4 or r1==5) and len(prefix1) != 0:
fatal("Illegal combination of operands")
instr = prefix1
if len(prefix1) == 0:
instr.extend(prefix2)
instr.append(0x40 + 8*r1 + r2)
instr.extend(postfix1)
instr.extend(postfix2)
dump(instr)
return len(instr)
else:
if r1 > 7:
fatal("Invalid argument")
if r1==7 and re.search(r"\A\s*\(\s*BC\s*\)\s*\Z", arg2, re.IGNORECASE):
dump([0x0a])
return 1
if r1==7 and re.search(r"\A\s*\(\s*DE\s*\)\s*\Z", arg2, re.IGNORECASE):
dump([0x1a])
return 1
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", arg2)
if match:
if r1 != 7:
fatal("Illegal indirection")
if p==2:
nn = parse_expression(match.group(1), word=1)
dump([0x3a, nn%256, nn//256])
return 3
instr = prefix1
instr.append(0x06 + 8*r1)
instr.extend(postfix1)
if (p==2):
n = parse_expression(arg2, byte=1)
else:
n = 0
instr.append(n)
dump(instr)
return len(instr)
elif r2==7:
# ld (bc/de/nn),a
if re.search(r"\A\s*\(\s*BC\s*\)\s*\Z", arg1, re.IGNORECASE):
dump([0x02])
return 1
if re.search(r"\A\s*\(\s*DE\s*\)\s*\Z", arg1, re.IGNORECASE):
dump([0x12])
return 1
match = re.search(r"\A\s*\(\s*(.*)\s*\)\s*\Z", arg1)
if match:
if p==2:
nn = parse_expression(match.group(1), word=1)
dump([0x32, nn%256, nn//256])
return 3
fatal("LD args not understood - "+arg1+", "+arg2)
return 1
#ifstate=0: parse all code
#ifstate=1: parse this code, but stop at ELSE
#ifstate=2: do not parse this code, but might start at ELSE
#ifstate=3: do not parse any code until ENDIF
def op_IF(p,opargs):
global ifstate, ifstack
check_args(opargs,1)
ifstack.append( (global_currentfile,ifstate) )
if ifstate < 2:
cond = parse_expression(opargs)
if cond:
ifstate = 1
else:
ifstate = 2
else:
ifstate = 3
return 0
def op_ELSE(p,opargs):
global ifstate, ifstack
if ifstate==1 or ifstate==3:
ifstate = 3
elif ifstate==2:
if opargs.upper().startswith("IF"):
cond = parse_expression(opargs[2:].strip())
if cond:
ifstate = 1
else:
ifstate = 2
else:
ifstate = 1
else:
fatal("Mismatched ELSE")
return 0
def op_ENDIF(p,opargs):
global ifstate, ifstack
check_args(opargs,0)
if len(ifstack) == 0:
fatal("Mismatched ENDIF")
ifline,state = ifstack.pop()
ifstate = state
return 0
def assemble_instruction(p, line):
match = re.match('^(\w+)(.*)', line)
if not match:
fatal("Expected opcode or directive")
inst = match.group(1).upper()
args = match.group(2).strip()
if (ifstate < 2) or inst in ('IF', 'ELSE', 'ENDIF'):
functioncall = 'op_'+inst+'(p,args)'
if PYTHONERRORS:
return eval(functioncall)
else:
try:
return eval(functioncall)
except SystemExit as e:
sys.exit(e)
except:
fatal("Opcode not recognised")
else:
return 0
def assembler_pass(p, inputfile):
global memory, symboltable, symusetable, labeltable, origin, dumppage, dumporigin, symbol
global global_currentfile, global_currentline, lstcode, listingfile
# file references are local, so assembler_pass can be called recursively (for op_INC)
# but copied to a global identifier for warning printouts
global global_path
global_currentfile="command line"
global_currentline="0"
# just read the whole file into memory, it's not going to be huge (probably)
# I'd prefer not to, but assembler_pass can be called recursively
# (by op_INCLUDE for example) and fileinput does not support two files simultaneously
this_currentfilename = os.path.join(global_path, inputfile)
if os.sep in this_currentfilename:
global_path = os.path.dirname(this_currentfilename)
try:
currentfile = open(this_currentfilename,'r')
wholefile=currentfile.readlines()
wholefile.insert(0, '') # prepend blank so line numbers are 1-based
currentfile.close()
except:
fatal("Couldn't open file "+this_currentfilename+" for reading")
consider_linenumber=0
while consider_linenumber < len(wholefile):
currentline = wholefile[consider_linenumber]
global_currentline = currentline
global_currentfile = this_currentfilename+":"+str(consider_linenumber)
# write these every instruction because an INCLUDE may have overwritten them
symbol = ''
opcode = ''
inquotes = ''
inquoteliteral = False
i = ""
for nexti in currentline+" ":
if (i==';' or i=='#') and not inquotes:
break
if i==':' and not inquotes:
symbol = opcode
opcode=''
i = ''
if i == '"':
if not inquotes:
inquotes = i
else:
if (not inquoteliteral) and nexti=='"':
inquoteliteral = True
elif inquoteliteral:
inquoteliteral = False
inquotes += i
else:
inquotes += i
if inquotes == '""':
inquotes = '"""'
elif inquotes == '","':
inquotes = " 44 "
i = ""
opcode += inquotes
inquotes = ""
elif inquotes:
inquotes += i
else:
opcode += i
i = nexti
symbol = symbol.strip()
opcode = opcode.strip()
if inquotes:
fatal("Mismatched quotes")
if len( symbol.split()) > 1:
fatal("Whitespace not allowed in symbol name")
if (symbol and (opcode[0:3].upper() !="EQU") and (ifstate < 2)):
if p==1:
set_symbol(symbol, origin, is_label=True)
elif get_symbol(symbol) != origin:
fatal("Symbol "+symbol+": expected "+str(get_symbol(symbol))+" but calculated "+str(origin)+", has this symbol been used twice?")
if (opcode):
bytes = assemble_instruction(p,opcode)
if p>1 and listingfile != None:
lstout="%04X %-13s\t%s"%(origin,lstcode,wholefile[consider_linenumber].rstrip())
lstcode=""
writelisting(lstout)
origin = (origin + bytes) % 65536
else:
if p>1 and listingfile != None:
lstout=" %-13s\t%s"%("",wholefile[consider_linenumber].rstrip())
lstcode=""
writelisting(lstout)
if global_currentfile.startswith(this_currentfilename+":") and int(global_currentfile.rsplit(':',1)[1]) != consider_linenumber:
consider_linenumber = int(global_currentfile.rsplit(':', 1)[1])
consider_linenumber += 1
###########################################################################
try:
option_args, file_args = getopt.getopt(sys.argv[1:], 'ho:s:eD:I:', ['version','help','nozip','obj=','case','nobodmas','intdiv','exportfile=','importfile=','mapfile=','lstfile='])
file_args = [os.path.normpath(x) for x in file_args]
except getopt.GetoptError:
printusage()
sys.exit(2)
inputfile = ''
outputfile = ''
objectfile = ''
PYTHONERRORS = False
ZIP = True
CASE = False
NOBODMAS = False
INTDIV = False
lstcode=""
listsymbols=[]
predefsymbols=[]
includefiles=[]
importfiles=[]
exportfile = None
mapfile = None
listingfile = None
def writelisting(line):
if listingfile != None:
listingfile.write(line+"\n")
for option,value in option_args:
if option in ['--version']:
printusage()
print("")
printlicense()
sys.exit(0)
if option in ['--help','-h']:
printusage()
sys.exit(0)
if option in ['-o']:
outputfile=value
if option in ['--obj']:
objectfile=value
if option in ['-s']:
listsymbols.append(value)
if option in ['-e']:
PYTHONERRORS = True # let python do its own error handling
if option in ['--nozip']:
ZIP = False # save the disk image without compression
if option in ['--nobodmas']:
NOBODMAS = True # use no operator precedence
if option in ['--case']:
CASE = True
if option in ['--intdiv']:
INTDIV = True
if option in ['--exportfile']:
if exportfile == None:
exportfile = value
else:
print("Export file specified twice")
printusage()
sys.exit(2)
if option in ['--importfile']:
importfiles.append(value)
if option in ['--mapfile']:
if mapfile == None:
mapfile = value
else:
print("Map file specified twice")
printusage()
sys.exit(2)
if option in ['--lstfile']:
if listingfile == None:
listingfile=open(value,"wt")
else:
print("List file specified twice")
printusage()
sys.exit(2)
if option in ['-D']:
predefsymbols.append(value)
if option in ['-I']:
includefiles.append(value)
if len(file_args) == 0 and len(includefiles) == 0:
print("No input file specified")
printusage()
sys.exit(2)
if (objectfile != '') and (len(file_args) != 1):
print("Object file output supports only a single source file")
printusage()
sys.exit(2)
if (outputfile == '') and (objectfile == ''):
outputfile = os.path.splitext(file_args[0])[0] + ".dsk"
image = new_disk_image()
for pathname in includefiles:
save_file_to_image(image,pathname)
for inputfile in file_args:
if (inputfile == outputfile) or (inputfile == objectfile):
print("Output file and input file are the same!")
printusage()
sys.exit(2)
symboltable = {}
symbolcase = {}
symusetable = {}
labeltable = {}
memory = []
forstack=[]
ifstack = []
ifstate = 0
for value in predefsymbols:
sym=value.split('=',1)
if len(sym)==1:
sym.append("1")
if not CASE:
sym[0]=sym[0].upper()
if PYTHONERRORS:
val = int(sym[1])
else:
try:
val = int(sym[1])
except:
print("Error: Invalid value for symbol predefined on command line, "+value)
sys.exit(1)
set_symbol(sym[0], int(sym[1]))
for picklefilename in importfiles:
with open(picklefilename, "rb") as f:
ImportSymbols = pickle.load(f)
for sym,val in list(ImportSymbols.items()):
symkey = sym if CASE else sym.upper()
symboltable[symkey]=val
if symkey != sym:
symbolcase[symkey] = sym
firstpage=32
firstpageoffset=16384
lastpage=-1
lastpageoffset=0
# always 32 memory pages, each a 16k array allocate-on-write
for initmemorypage in range(32):
memory.append('')
for p in 1,2:
print("pass ",p,"...")
global_path=''
include_stack=[]
origin = 32768
dumppage = 1
dumporigin = 0
dumpspace_pending = 0
dumpused = False
autoexecpage = 0
autoexecorigin = 0
assembler_pass(p, inputfile)
check_lastpage()
if len(ifstack) > 0:
print("Error: Mismatched IF and ENDIF statements, too many IF")
for item in ifstack:
print(item[0])
sys.exit(1)
if len(forstack) > 0:
print("Error: Mismatched EQU FOR and NEXT statements, too many EQU FOR")
for item in forstack:
print(item[1])
sys.exit(1)
printsymbols = {}
for symreg in listsymbols:
# add to printsymbols any pair from symboltable whose key matches symreg
for sym in symboltable:
if re.search(symreg, sym, 0 if CASE else re.IGNORECASE):
printsymbols[symbolcase.get(sym, sym)] = symboltable[sym]
if printsymbols != {}:
print(printsymbols)
if exportfile:
with open(exportfile, 'wb') as f:
pickle.dump({ symbolcase.get(k, k):v for k, v in symboltable.items() }, f, protocol=0)
if mapfile:
addrmap = {}
for sym,count in sorted(list(symusetable.items()), key=lambda x: x[1]):
if sym in labeltable:
symkey = sym if CASE else sym.upper()
symorig = symbolcase.get(sym, sym)
if symorig[0] == '@':
symorig += ':' + sym.rsplit(':', 1)[1]
addrmap[labeltable[sym]] = symorig
with open(mapfile,'w') as f:
for addr,sym in sorted(addrmap.items()):
f.write("%04X=%s\n" % (addr,sym))
save_memory(memory, image=image, filename=os.path.splitext(os.path.basename(inputfile))[0])
if objectfile != "":
save_memory(memory, filename=objectfile)
if outputfile != '':
save_disk_image(image, outputfile)
print("Finished")
| 31.484289
| 182
| 0.55527
|
288e84154549b57311ee51af8e7397e326b9aff0
| 13,448
|
py
|
Python
|
train.py
|
wwydmanski/ViT-pytorch
|
bdf59d11b97f3343785b1b13c547f21675a4c5d6
|
[
"MIT"
] | null | null | null |
train.py
|
wwydmanski/ViT-pytorch
|
bdf59d11b97f3343785b1b13c547f21675a4c5d6
|
[
"MIT"
] | null | null | null |
train.py
|
wwydmanski/ViT-pytorch
|
bdf59d11b97f3343785b1b13c547f21675a4c5d6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import, division, print_function
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import torch
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
model_to_save = model.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
torch.save(model_to_save.state_dict(), model_checkpoint)
logger.info("Saved model checkpoint to [DIR: %s]", args.output_dir)
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes)
# model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
print(num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def valid(args, model, writer, test_loader, global_step):
# Validation!
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits, y)
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0
)
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0
)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
logger.info("\n")
logger.info("Validation Results")
logger.info("Global Steps: %d" % global_step)
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
logger.info("Valid Accuracy: %2.5f" % accuracy)
writer.add_scalar("test/accuracy", scalar_value=accuracy, global_step=global_step)
return accuracy
def train(args, model):
""" Train the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
writer = SummaryWriter(log_dir=os.path.join("logs", args.name))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
train_loader, test_loader = get_loader(args)
# Prepare optimizer and scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
model, optimizer = amp.initialize(models=model,
optimizers=optimizer,
opt_level=args.fp16_opt_level)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Distributed training
if args.local_rank != -1:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
# Train!
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", args.num_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step, best_acc = 0, 0
while True:
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
loss = model(x, y)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val)
)
if args.local_rank in [-1, 0]:
writer.add_scalar("train/loss", scalar_value=losses.val, global_step=global_step)
writer.add_scalar("train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
save_model(args, model)
best_acc = accuracy
model.train()
if global_step % t_total == 0:
break
losses.reset()
if global_step % t_total == 0:
break
if args.local_rank in [-1, 0]:
writer.close()
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("End Training!")
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["cifar10", "cifar100", "fashion-mnist"], default="cifar10",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16", "ViT-fmnist",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be written.")
parser.add_argument("--img_size", default=224, type=int,
help="Resolution size")
parser.add_argument("--train_batch_size", default=512, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=100, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
# Model & Tokenizer Setup
args, model = setup(args)
# Training
train(args, model)
if __name__ == "__main__":
main()
| 40.87538
| 113
| 0.601428
|
b77ca9ab6e85d7c9a66e7a01299bd35f88c76905
| 3,649
|
py
|
Python
|
test/09-text-sort-test.py
|
cloudant/mango
|
3da3110a6ee169c4d6991c00b8f8d59d20b7fe1f
|
[
"Apache-2.0"
] | 177
|
2015-01-12T08:59:11.000Z
|
2021-12-08T08:41:08.000Z
|
test/09-text-sort-test.py
|
cloudant/mango
|
3da3110a6ee169c4d6991c00b8f8d59d20b7fe1f
|
[
"Apache-2.0"
] | 31
|
2015-01-09T13:36:39.000Z
|
2016-04-07T18:30:13.000Z
|
test/09-text-sort-test.py
|
cloudant/mango
|
3da3110a6ee169c4d6991c00b8f8d59d20b7fe1f
|
[
"Apache-2.0"
] | 6
|
2015-01-09T17:28:55.000Z
|
2019-01-11T03:47:39.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import user_docs
class SortTests(mango.UserDocsTextTests):
def test_number_sort(self):
q = {"age": {"$gt": 0}}
docs = self.db.find(q, sort=["age:number"])
assert len(docs) == 15
assert docs[0]["age"] == 22
def test_number_sort_desc(self):
q = {"age": {"$gt": 0}}
docs = self.db.find(q, sort=[{"age": "desc"}])
assert len(docs) == 15
assert docs[0]["age"] == 79
q = {"manager": True}
docs = self.db.find(q, sort=[{"age:number": "desc"}])
assert len(docs) == 11
assert docs[0]["age"] == 79
def test_string_sort(self):
q = {"email": {"$gt": None}}
docs = self.db.find(q, sort=["email:string"])
assert len(docs) == 15
assert docs[0]["email"] == "abbottwatson@talkola.com"
def test_notype_sort(self):
q = {"email": {"$gt": None}}
try:
self.db.find(q, sort=["email"])
except Exception, e:
assert e.response.status_code == 400
else:
raise AssertionError("Should have thrown error for sort")
def test_array_sort(self):
q = {"favorites": {"$exists": True}}
docs = self.db.find(q, sort=["favorites.[]:string"])
assert len(docs) == 15
assert docs[0]["user_id"] == 8
def test_multi_sort(self):
q = {"name": {"$exists": True}}
docs = self.db.find(q, sort=["name.last:string", "age:number"])
assert len(docs) == 15
assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
assert docs[1]["age"] == 22
def test_guess_type_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}]}
docs = self.db.find(q, sort=["age"])
assert len(docs) == 15
assert docs[0]["age"] == 22
def test_guess_dup_type_sort(self):
q = {"$and": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"age":{"$lte": 100}}]}
docs = self.db.find(q, sort=["age"])
assert len(docs) == 15
assert docs[0]["age"] == 22
def test_ambiguous_type_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"age": "34"}]}
try:
self.db.find(q, sort=["age"])
except Exception, e:
assert e.response.status_code == 400
else:
raise AssertionError("Should have thrown error for sort")
def test_guess_multi_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"name.last": "Harvey"}]}
docs = self.db.find(q, sort=["name.last", "age"])
assert len(docs) == 15
assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
assert docs[1]["age"] == 22
def test_guess_mix_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"name.last": "Harvey"}]}
docs = self.db.find(q, sort=["name.last:string", "age"])
assert len(docs) == 15
assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
assert docs[1]["age"] == 22
| 36.128713
| 79
| 0.539326
|
13f350addedcfa64e971397a2017af2c5bb4d227
| 4,821
|
py
|
Python
|
SoftLayer/managers/metadata.py
|
dvzrv/softlayer-python
|
9a5f6c6981bcc370084537b4d1769383499ce90d
|
[
"MIT"
] | 126
|
2015-01-05T05:09:22.000Z
|
2021-07-02T00:16:35.000Z
|
SoftLayer/managers/metadata.py
|
dvzrv/softlayer-python
|
9a5f6c6981bcc370084537b4d1769383499ce90d
|
[
"MIT"
] | 969
|
2015-01-05T15:55:31.000Z
|
2022-03-31T19:55:20.000Z
|
SoftLayer/managers/metadata.py
|
dvzrv/softlayer-python
|
9a5f6c6981bcc370084537b4d1769383499ce90d
|
[
"MIT"
] | 176
|
2015-01-22T11:23:40.000Z
|
2022-02-11T13:16:58.000Z
|
"""
SoftLayer.metadata
~~~~~~~~~~~~~~~~~~
Metadata Manager/helpers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer.API import BaseClient
from SoftLayer import consts
from SoftLayer import exceptions
from SoftLayer import transports
METADATA_MAPPING = {
'backend_mac': {'call': 'getBackendMacAddresses'},
'datacenter': {'call': 'getDatacenter'},
'datacenter_id': {'call': 'getDatacenterId'},
'domain': {'call': 'getDomain'},
'frontend_mac': {'call': 'getFrontendMacAddresses'},
'fqdn': {'call': 'getFullyQualifiedDomainName'},
'hostname': {'call': 'getHostname'},
'id': {'call': 'getId'},
'primary_backend_ip': {'call': 'getPrimaryBackendIpAddress'},
'primary_ip': {'call': 'getPrimaryIpAddress'},
'primary_frontend_ip': {'call': 'getPrimaryIpAddress'},
'provision_state': {'call': 'getProvisionState'},
'router': {'call': 'getRouter', 'param_req': True},
'tags': {'call': 'getTags'},
'user_data': {'call': 'getUserMetadata'},
'user_metadata': {'call': 'getUserMetadata'},
'vlan_ids': {'call': 'getVlanIds', 'param_req': True},
'vlans': {'call': 'getVlans', 'param_req': True},
}
METADATA_ATTRIBUTES = METADATA_MAPPING.keys()
class MetadataManager(object):
"""Provides an interface for the SoftLayer metadata service.
See product information here:
http://sldn.softlayer.com/reference/services/SoftLayer_Resource_Metadata
This provides metadata about the resourse it is called from.
See `METADATA_ATTRIBUTES` for full list of attributes.
Usage:
>>> import SoftLayer
>>> client = SoftLayer.create_client_from_env()
>>> from SoftLayer import MetadataManager
>>> meta = MetadataManager(client)
>>> meta.get('datacenter')
'dal05'
>>> meta.get('fqdn')
'test.example.com'
:param SoftLayer.API.BaseClient client: the client instance
"""
attribs = METADATA_MAPPING
def __init__(self, client=None, timeout=5):
if client is None:
transport = transports.RestTransport(
timeout=timeout,
endpoint_url=consts.API_PRIVATE_ENDPOINT_REST,
)
client = BaseClient(transport=transport)
self.client = client
def get(self, name, param=None):
"""Retreive a metadata attribute.
:param string name: name of the attribute to retrieve. See `attribs`
:param param: Required parameter for some attributes
"""
if name not in self.attribs:
raise exceptions.SoftLayerError('Unknown metadata attribute.')
call_details = self.attribs[name]
if call_details.get('param_req'):
if not param:
raise exceptions.SoftLayerError(
'Parameter required to get this attribute.')
params = tuple()
if param is not None:
params = (param,)
try:
return self.client.call('Resource_Metadata',
self.attribs[name]['call'],
*params)
except exceptions.SoftLayerAPIError as ex:
if ex.faultCode == 404:
return None
raise ex
def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):
"""Wrapper for getting details about networks.
:param string kind: network kind. Typically 'public' or 'private'
:param boolean router: flag to include router information
:param boolean vlans: flag to include vlan information
:param boolean vlan_ids: flag to include vlan_ids
"""
network = {}
macs = self.get('%s_mac' % kind)
network['mac_addresses'] = macs
if len(macs) == 0:
return network
if router:
network['router'] = self.get('router', macs[0])
if vlans:
network['vlans'] = self.get('vlans', macs[0])
if vlan_ids:
network['vlan_ids'] = self.get('vlan_ids', macs[0])
return network
def public_network(self, **kwargs):
"""Returns details about the public network.
:param boolean router: True to return router details
:param boolean vlans: True to return vlan details
:param boolean vlan_ids: True to return vlan_ids
"""
return self._get_network('frontend', **kwargs)
def private_network(self, **kwargs):
"""Returns details about the private network.
:param boolean router: True to return router details
:param boolean vlans: True to return vlan details
:param boolean vlan_ids: True to return vlan_ids
"""
return self._get_network('backend', **kwargs)
| 32.574324
| 77
| 0.606513
|
d94512b982a41644d86012c081d85771f60b080e
| 2,448
|
py
|
Python
|
bin/check-assembly-accession.py
|
simone-pignotti/bactopia
|
19267101de927589ed222d5655fef171c7e68a4f
|
[
"MIT"
] | null | null | null |
bin/check-assembly-accession.py
|
simone-pignotti/bactopia
|
19267101de927589ed222d5655fef171c7e68a4f
|
[
"MIT"
] | null | null | null |
bin/check-assembly-accession.py
|
simone-pignotti/bactopia
|
19267101de927589ed222d5655fef171c7e68a4f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""
"""
PROGRAM = "check-assembly-accession"
VERSION = "1.5.3"
def check_assembly_version(accession):
from Bio import Entrez
import time
import json
Entrez.email = "robert.petit@emory.edu"
Entrez.tool = "BactopiaSelectReferences"
handle = Entrez.esearch(db="assembly", term=accession, retmax="500")
record = Entrez.read(handle, validate=False)
time.sleep(1) # Be kind to NCBI
if len(record["IdList"]):
handle = Entrez.esummary(db="assembly", id=",".join(record["IdList"]))
record = Entrez.read(handle, validate=False)
time.sleep(1) # Be kind to NCBI
records = []
excluded = set()
for assembly in record['DocumentSummarySet']["DocumentSummary"]:
if assembly["ExclFromRefSeq"]:
# PGAP can cause some Assemblies to eventually become excluded from RefSeq
# https://www.ncbi.nlm.nih.gov/assembly/help/anomnotrefseq/
for reason in assembly["ExclFromRefSeq"]:
excluded.add(reason)
else:
records.append(assembly["AssemblyAccession"])
if excluded:
return [','.join(list(excluded)), True]
else:
return [sorted(records, reverse=True)[0], False]
else:
return [f'No records found for {accession}', True]
if __name__ == '__main__':
import argparse as ap
from collections import defaultdict
import random
import sys
parser = ap.ArgumentParser(
prog=PROGRAM,
conflict_handler='resolve',
description=(
f'{PROGRAM} (v{VERSION}) - Verifies NCBI Assembly accession is latest and still available'
)
)
parser.add_argument(
'reference', metavar="STR", type=str,
help='NCBI Assembly accession to be tested.'
)
parser.add_argument('--version', action='version',
version=f'{PROGRAM} {VERSION}')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
reference = args.reference.split('.')[0]
current_accession, excluded = check_assembly_version(reference)
if excluded:
print(
f'Skipping {reference}. Reason: {current_accession}',
file=sys.stderr
)
else:
print(f'Using {current_accession} for {args.reference}', file=sys.stderr)
print(current_accession)
| 31.384615
| 102
| 0.609069
|
9feab2f73cda8cfb2f0c9cf6e291a8cd8a6c479a
| 5,655
|
py
|
Python
|
tools/port_controller.py
|
iPlantCollaborativeOpenSource/iPlant-Atmosphere
|
d67b953561e813dd30ffa52c8440af7cc2d990cf
|
[
"Unlicense"
] | 1
|
2017-10-05T08:03:37.000Z
|
2017-10-05T08:03:37.000Z
|
tools/port_controller.py
|
iPlantCollaborativeOpenSource/iPlant-Atmosphere
|
d67b953561e813dd30ffa52c8440af7cc2d990cf
|
[
"Unlicense"
] | null | null | null |
tools/port_controller.py
|
iPlantCollaborativeOpenSource/iPlant-Atmosphere
|
d67b953561e813dd30ffa52c8440af7cc2d990cf
|
[
"Unlicense"
] | null | null | null |
#!/usr/local/bin/python
import sys
import os
from euca2ools import Euca2ool, AddressValidationError, ProtocolValidationError, Util
import subprocess
# python ./port_controller.py [esteve] [22,80,443,5900,5901,5902,5903,5904,5905,5906,5907,5908,5909,5910,1247] [1247]
class PortControllerException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def openPort(user_list,protocol,port_list):
#euca-authorize -p tcp -p 22 -s 0.0.0.0/0 default
euca = None
try:
euca = Euca2ool('P:p:o:u:s:t:', ['protocol=', 'port-range=', 'source-group=', 'source-group-user=', 'source-subnet=', 'icmp-type-code='], compat=True)
except Exception, e:
print e
for user in user_list:
for port in port_list:
group_name = None
protocol = protocol
from_port = port
to_port = port
source_group_name = None
source_group_owner_id = None
cidr_ip = None
euca_conn = euca.make_connection()
def main():
if len(sys.argv) != 4 :
print "Usage: port_controller.py [user list] [TCP port list] [UDP port list]"
print "ex) port_controller [user1,user2] [22,80,443,5900,5901,5902,5903,5904,5905,5906,5907,5908,5909,5910,1247] [1247]"
print "ex) port_controller * [22,80,90,100] [23,24]"
sys.exit()
if (sys.argv[1][0] != "[" or sys.argv[1][-1] != "]") or (sys.argv[2][0] != "[" or sys.argv[2][-1] != "]") or (sys.argv[3][0] != "[" or sys.argv[3][-1] != "]"):
print "invalid argument format"
sys.exit()
userList = (lambda x: map(str, x[1:-1].split(',')))(sys.argv[1])
try:
tcpPortList = (lambda x: map(int, x[1:-1].split(',')))(sys.argv[2])
except ValueError:
tcpPortList = []
try:
udpPortList = (lambda x: map(int, x[1:-1].split(',')))(sys.argv[3])
except ValueError:
udpPortList = []
if len(filter(lambda x: not os.path.exists(os.getcwd()+"/odin-"+x+"-x509/eucarc"),userList)) > 0 :
print "User %s does/do not exit" % str(filter(lambda x: not os.path.exists(os.getcwd()+"/odin-"+x+"-x509/eucarc"),userList))
sys.exit()
run_cmd = lambda c : subprocess.Popen(c.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False).stdout.read()
for user in userList:
print user
euca = None
my_EC2_ACCESS_KEY = None
my_EC2_SECRET_KEY = None
my_EC2_URL = None
my_S3_URL = None
eucarc_file = None
eucarc_file = open(os.getcwd()+"/odin-"+user+"-x509/eucarc",'r')
for line in eucarc_file.readlines():
if line[0] != "#" and line.split()[0] == "export":
#setattr(self,line.split()[1].split("=",1)[0],line.split()[1].split("=",1)[1])
#globals()[ "my_"+line.split()[1].split("=",1)[0] = line.split()[1].split("=",1)[1]]
#locals()[ "my_%s" % line.split()[1].split("=",1)[0] ] = line.split()[1].split("=",1)[1]
#exec 'my_%s = %s' % (line.split()[1].split("=",1)[0], line.split()[1].split("=",1)[1])
#setattr(euca,line.split()[1].split("=",1)[0],line.split()[1].split("=",1)[1])
if line.split()[1].split("=",1)[0] == "EC2_ACCESS_KEY" :
my_EC2_ACCESS_KEY = line.split()[1].split("=",1)[1][1:-1]
if line.split()[1].split("=",1)[0] == "EC2_SECRET_KEY" :
my_EC2_SECRET_KEY = line.split()[1].split("=",1)[1][1:-1]
if line.split()[1].split("=",1)[0] == "EC2_URL" :
my_EC2_URL = line.split()[1].split("=",1)[1]
if line.split()[1].split("=",1)[0] == "S3_URL" :
my_S3_URL = line.split()[1].split("=",1)[1]
try:
euca = Euca2ool('P:p:o:u:s:t:', ['protocol=', 'port-range=', 'source-group=', 'source-group-user=', 'source-subnet=', 'icmp-type-code='], compat=True)
#euca = Euca2ool()
euca.ec2_user_access_key = my_EC2_ACCESS_KEY
euca.ec2_user_secret_key = my_EC2_SECRET_KEY
euca.ec2_url = my_EC2_URL
euca.s3_url = my_S3_URL
euca_conn = euca.make_connection()
except Exception, e:
print "\neuca conn error\n" + str(e)
for tcp_port in tcpPortList:
group_name = "default"
protocol = "tcp"
from_port = tcp_port
to_port = tcp_port
source_group_name = None
source_group_owner_id = None
cidr_ip = "0.0.0.0/0"
euca.validate_address(cidr_ip)
euca.validate_protocol(protocol)
#euca_conn = euca.make_connection()
try:
return_code = euca_conn.authorize_security_group(group_name = group_name,
src_security_group_name = source_group_name,
src_security_group_owner_id = source_group_owner_id,
ip_protocol = protocol,
from_port = tcp_port,
to_port = tcp_port,
cidr_ip = cidr_ip)
print "\t%s %s %s" % (str(return_code), protocol, from_port)
except Exception, ex:
print "\tauth cmd error\n" + str(ex)
for udp_port in udpPortList:
group_name = "default"
protocol = "udp"
from_port = udp_port
to_port = udp_port
source_group_name = []
source_group_owner_id = []
cidr_ip = "0.0.0.0/0"
#euca_conn = euca.make_connection()
try:
return_code = euca_conn.authorize_security_group(group_name = group_name,src_security_group_name = source_group_name,src_security_group_owner_id = source_group_owner_id,ip_protocol = protocol,from_port = from_port,to_port = to_port,cidr_ip = cidr_ip)
print "\t%s %s %s" % (str(return_code), protocol, from_port)
except Exception, ex:
print ex
if __name__ == "__main__":
main()
#python ./port_controller.py [esteve] [22,80,443,5900,5901,5902,5903,5904,5905,5906,5907,5908,5909,5910,1247] [1247]
| 37.7
| 258
| 0.618568
|
26e8412ba9314fb8db387f0f3499115e6e4bcc1a
| 3,990
|
py
|
Python
|
Source/chrome/tools/perf/metrics/speedindex_unittest.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 20
|
2015-08-26T06:46:00.000Z
|
2019-02-27T09:05:58.000Z
|
Source/chrome/tools/perf/metrics/speedindex_unittest.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | null | null | null |
Source/chrome/tools/perf/metrics/speedindex_unittest.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 2
|
2015-08-26T05:49:35.000Z
|
2020-02-03T20:22:43.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These tests access private methods in the speedindex module.
# pylint: disable=W0212
import unittest
from telemetry.image_processing import histogram
from telemetry.image_processing import rgba_color
from metrics import speedindex
class FakeImageUtil(object):
# pylint: disable=W0613
def GetColorHistogram(self, image, ignore_color=None, tolerance=None):
return image.ColorHistogram()
class FakeVideo(object):
def __init__(self, frames):
self._frames = frames
def GetVideoFrameIter(self):
for frame in self._frames:
yield frame
class FakeBitmap(object):
def __init__(self, r, g, b):
self._histogram = histogram.ColorHistogram(r, g, b, rgba_color.WHITE)
# pylint: disable=W0613
def ColorHistogram(self, ignore_color=None, tolerance=None):
return self._histogram
class FakeTab(object):
def __init__(self, video_capture_result=None):
self._javascript_result = None
self._video_capture_result = FakeVideo(video_capture_result)
@property
def video_capture_supported(self):
return self._video_capture_result is not None
def SetEvaluateJavaScriptResult(self, result):
self._javascript_result = result
def EvaluateJavaScript(self, _):
return self._javascript_result
def StartVideoCapture(self, min_bitrate_mbps=1):
assert self.video_capture_supported
assert min_bitrate_mbps > 0
def StopVideoCapture(self):
assert self.video_capture_supported
return self._video_capture_result
def Highlight(self, _):
pass
class SpeedIndexImplTest(unittest.TestCase):
def testVideoCompleteness(self):
frames = [
(0.0, FakeBitmap([ 0, 0, 0,10], [ 0, 0, 0,10], [ 0, 0, 0,10])),
(0.1, FakeBitmap([10, 0, 0, 0], [10, 0, 0, 0], [10, 0, 0, 0])),
(0.2, FakeBitmap([ 0, 0, 2, 8], [ 0, 0, 4, 6], [ 0, 0, 1, 9])),
(0.3, FakeBitmap([ 0, 3, 2, 5], [ 2, 1, 0, 7], [ 0, 3, 0, 7])),
(0.4, FakeBitmap([ 0, 0, 1, 0], [ 0, 0, 1, 0], [ 0, 0, 1, 0])),
(0.5, FakeBitmap([ 0, 4, 6, 0], [ 0, 4, 6, 0], [ 0, 4, 6, 0])),
]
max_distance = 42.
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl(FakeImageUtil())
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 6)
self.assertEqual(time_completeness[0], (0.0, 0))
self.assertTimeCompleteness(
time_completeness[1], 0.1, 1 - (16 + 16 + 16) / max_distance)
self.assertTimeCompleteness(
time_completeness[2], 0.2, 1 - (12 + 10 + 13) / max_distance)
self.assertTimeCompleteness(
time_completeness[3], 0.3, 1 - (6 + 10 + 8) / max_distance)
self.assertTimeCompleteness(
time_completeness[4], 0.4, 1 - (4 + 4 + 4) / max_distance)
self.assertEqual(time_completeness[5], (0.5, 1))
def testBlankPage(self):
frames = [
(0.0, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.1, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.2, FakeBitmap([1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.3, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
]
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl(FakeImageUtil())
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 4)
self.assertEqual(time_completeness[0], (0.0, 1.0))
self.assertEqual(time_completeness[1], (0.1, 1.0))
self.assertEqual(time_completeness[2], (0.2, 0.0))
self.assertEqual(time_completeness[3], (0.3, 1.0))
def assertTimeCompleteness(self, time_completeness, time, completeness):
self.assertEqual(time_completeness[0], time)
self.assertAlmostEqual(time_completeness[1], completeness)
if __name__ == "__main__":
unittest.main()
| 32.975207
| 74
| 0.660652
|
7c443b73f0c5a083f4ac3d4422fd9110af79bee7
| 5,074
|
py
|
Python
|
docs/conf.py
|
davidr/python-timemarker
|
16fc4d4f9d6b534b94005169c977ed143f4f9dc5
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
davidr/python-timemarker
|
16fc4d4f9d6b534b94005169c977ed143f4f9dc5
|
[
"MIT"
] | 1
|
2017-10-31T03:54:26.000Z
|
2017-11-01T03:01:43.000Z
|
docs/conf.py
|
davidr/python-timemarker
|
16fc4d4f9d6b534b94005169c977ed143f4f9dc5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# timemarker documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 30 22:24:39 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'timemarker'
copyright = '2017, David Ressman'
author = 'David Ressman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'timemarkerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'timemarker.tex', 'timemarker Documentation',
'David Ressman', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'timemarker', 'timemarker Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'timemarker', 'timemarker Documentation',
author, 'timemarker', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 30.383234
| 79
| 0.683682
|
7b99ee94937f6463bdacd67dd7fbe730aa1fbea6
| 2,608
|
py
|
Python
|
tests/test_get_deposit_history.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2020-12-28T07:04:45.000Z
|
2020-12-28T07:04:45.000Z
|
tests/test_get_deposit_history.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_get_deposit_history.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2022-03-27T10:36:04.000Z
|
2022-03-27T10:36:04.000Z
|
import unittest
from huobi.impl.utils import *
from huobi.model import *
from huobi.impl.restapirequestimpl import RestApiRequestImpl
from huobi.impl.restapirequestimpl import account_info_map
data = '''
{
"status": "ok",
"data": [
{
"id": 1171,
"type": "deposit",
"currency": "ht",
"tx-hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
"amount": 7.457467,
"address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
"address-tag": "100040",
"fee": 345,
"chain":"abcd",
"state": "confirmed",
"created-at": 1510912472199,
"updated-at": 1511145876575
}
]
}
'''
class TestGetDepositHistory(unittest.TestCase):
@classmethod
def setUpClass(cls):
user = User()
account = Account()
account.account_type = AccountType.SPOT
account.id = 12345
accounts = list()
accounts.append(account)
user.accounts = accounts
account_info_map.user_map["12345"] = user
def test_request(self):
impl = RestApiRequestImpl("12345", "67890")
request = impl.get_deposit_history("btc", 24966984923, 1, "next")
self.assertEqual("GET", request.method)
self.assertTrue(request.url.find("/v1/query/deposit-withdraw") != -1)
self.assertTrue(request.url.find("Signature") != -1)
self.assertTrue(request.url.find("currency=btc") != -1)
self.assertTrue(request.url.find("from=24966984923") != -1)
self.assertTrue(request.url.find("size=1") != -1)
self.assertTrue(request.url.find("type=deposit") != -1)
self.assertTrue(request.url.find("direct=next") != -1)
def test_result(self):
impl = RestApiRequestImpl("12345", "67890")
request = impl.get_deposit_history("btc", 24966984923, 1, "next")
deposits = request.json_parser(parse_json_from_string(data))
self.assertEqual(1, len(deposits))
self.assertEqual(345, deposits[0].fee)
self.assertEqual(1171, deposits[0].id)
self.assertEqual(1510912472199, deposits[0].created_timestamp)
self.assertEqual(1511145876575, deposits[0].updated_timestamp)
self.assertTrue("rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm", deposits[0].address)
self.assertEqual("100040", deposits[0].address_tag)
self.assertEqual("ht", deposits[0].currency)
self.assertEqual("ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b", deposits[0].tx_hash)
self.assertEqual(WithdrawState.CONFIRMED, deposits[0].withdraw_state)
| 37.257143
| 113
| 0.653374
|
2c2173be8f54deb6fa2d720c48e1ce49d3876415
| 3,271
|
py
|
Python
|
mqttclient/__init__.py
|
Lucaszw/mqttclient-python
|
b8886a5189aa661a4247810af2e67f49acf09e5f
|
[
"BSD-2-Clause"
] | null | null | null |
mqttclient/__init__.py
|
Lucaszw/mqttclient-python
|
b8886a5189aa661a4247810af2e67f49acf09e5f
|
[
"BSD-2-Clause"
] | null | null | null |
mqttclient/__init__.py
|
Lucaszw/mqttclient-python
|
b8886a5189aa661a4247810af2e67f49acf09e5f
|
[
"BSD-2-Clause"
] | null | null | null |
"""MQTT Client for Python"""
__version__ = '0.0.2'
import asyncio
import inspect
import json
import os
import time
import random
import re
import urllib
import paho.mqtt.client as mqtt
from wheezy.routing import PathRouter
from .mqtt_messages import MqttMessages
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
def camelToSnake(s):
# https://gist.github.com/jaytaylor/3660565
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower()
class MqttClient(MqttMessages):
"""
Example MqttClient for Application Frameworks (such as Microdrop)
Used with the following broker:
https://github.com/sci-bots/microdrop-3/blob/master/MoscaServer.js
"""
def __init__(self, host="localhost", port=1883, base="microdrop"):
super().__init__()
self.router = PathRouter()
self.host = host
self.port = port
self.base = base
self.subscriptions = []
self.client_id = self.ClientID()
self.client = self.Client()
self.connected = False
@property
def filepath(self):
try:
return os.path.dirname(inspect.getfile(self.__class__))
except:
return 'unknown'
@property
def name(self):
safe_chars = '~@#$&()*!+=:;,.?/\''
return urllib.parse.quote(camelToSnake(self.__class__.__name__), safe=safe_chars)
@property
def version(self):
return '0.0'
def send_message(self, topic, msg={}, retain=False, qos=0, dup=False):
message = json.dumps(msg)
self.client.publish(topic, message, retain=retain, qos=qos)
def on_connect(self, client, userdata, flags, rc):
self.connected = True
self.listen()
self.trigger('start', 'null')
def on_disconnect(self, client, userdata, rc):
self.connected = False
def listen(self):
print(f'No listen method implemented for {self.name}')
def on_message(self, client, userdata, msg):
method, args = self.router.match(msg.topic)
try:
payload = json.loads(msg.payload)
except ValueError:
print("Message contains invalid json")
print(f'topic: {msg.topic}')
payload = None
if method:
method(payload, args)
def wrap_data(self, key, val):
msg = {}
if isinstance(val, dict) and val != None:
msg = val
else:
msg[key] = val
msg['__head__'] = self.DefaultHeader()
return msg
def Client(self, keepalive=60):
client = mqtt.Client(self.client_id)
client.on_connect = self.on_connect
client.on_message = self.on_message
client.on_disconnect = self.on_disconnect
client.connect(host=self.host, port=self.port)
client.loop_start()
return client
def ClientID(self):
timestamp = str(time.time()).replace(".","")
randnum = random.randint(1,1000)
return f'{self.name}>>{self.filepath}>>{timestamp}.{randnum}'
def DefaultHeader(self):
header = {}
header['plugin_name'] = self.name
header['plugin_version'] = self.version
return header
| 28.198276
| 89
| 0.610822
|
dc3a87c1618a508b1c4d785858e856af06b3614a
| 1,736
|
py
|
Python
|
paddlespeech/s2t/exps/u2/bin/train.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 1,379
|
2021-11-10T02:42:21.000Z
|
2022-03-31T13:34:25.000Z
|
paddlespeech/s2t/exps/u2/bin/train.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 268
|
2021-11-10T14:07:34.000Z
|
2022-03-31T02:25:20.000Z
|
paddlespeech/s2t/exps/u2/bin/train.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 296
|
2021-11-15T02:37:11.000Z
|
2022-03-31T12:14:46.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for U2 model."""
import cProfile
import os
from yacs.config import CfgNode
from paddlespeech.s2t.exps.u2.model import U2Trainer as Trainer
from paddlespeech.s2t.training.cli import default_argument_parser
from paddlespeech.s2t.utils.utility import print_arguments
# from paddlespeech.s2t.exps.u2.trainer import U2Trainer as Trainer
def main_sp(config, args):
exp = Trainer(config, args)
exp.setup()
exp.run()
def main(config, args):
main_sp(config, args)
if __name__ == "__main__":
parser = default_argument_parser()
args = parser.parse_args()
print_arguments(args, globals())
# https://yaml.org/type/float.html
config = CfgNode(new_allowed=True)
if args.config:
config.merge_from_file(args.config)
if args.opts:
config.merge_from_list(args.opts)
config.freeze()
print(config)
if args.dump_config:
with open(args.dump_config, 'w') as f:
print(config, file=f)
# Setting for profiling
pr = cProfile.Profile()
pr.runcall(main, config, args)
pr.dump_stats(os.path.join(args.output, 'train.profile'))
| 29.931034
| 74
| 0.722926
|
750429a9e5d392a455adc11a978eaa245bf0e5c5
| 1,854
|
py
|
Python
|
samples/test/util.py
|
jackwhelpton/pipelines
|
48216bc44c3d242cbba6a1f5e91493c6bc631536
|
[
"Apache-2.0"
] | null | null | null |
samples/test/util.py
|
jackwhelpton/pipelines
|
48216bc44c3d242cbba6a1f5e91493c6bc631536
|
[
"Apache-2.0"
] | null | null | null |
samples/test/util.py
|
jackwhelpton/pipelines
|
48216bc44c3d242cbba6a1f5e91493c6bc631536
|
[
"Apache-2.0"
] | null | null | null |
import kfp
MINUTE = 60
def default_verify_func(run_id, run):
assert run.status == 'Succeeded'
def run_pipeline_func(
pipeline_func,
verify_func=default_verify_func,
mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE
):
"""Run a pipeline function and wait for its result.
:param pipeline_func: pipeline function to run
:type pipeline_func: function
"""
def main(
output_directory: str, # example gs://your-bucket/path/to/workdir
host: str = 'http://ml-pipeline:8888',
launcher_image: 'URI' = None,
experiment: str = 'v2_sample_test_samples',
):
"""Test file CLI entrypoint used by Fire.
:param output_directory: pipeline output directory that holds intermediate artifacts.
:type output_directory: str
:param launcher_image: override launcher image, only used in V2_COMPATIBLE mode
:type launcher_image: URI, optional
:param experiment: experiment the run is added to, defaults to 'v2_sample_test_samples'
:type experiment: str, optional
"""
client = kfp.Client(host=host)
run_result = client.create_run_from_pipeline_func(
pipeline_func,
mode=mode,
arguments={kfp.dsl.ROOT_PARAMETER_NAME: output_directory},
launcher_image=launcher_image,
experiment_name=experiment,
)
print("Run details page URL:")
print(f"{host}/#/runs/details/{run_result.run_id}")
run_response = run_result.wait_for_run_completion(10 * MINUTE)
run = run_response.run
from pprint import pprint
pprint(run_response.run)
print("Run details page URL:")
print(f"{host}/#/runs/details/{run_result.run_id}")
verify_func(run_id=run_result.run_id, run=run)
import fire
fire.Fire(main)
| 33.107143
| 95
| 0.661812
|
03ccdeeff00f4c04127d451b10f4d6a7ca8a368f
| 3,363
|
py
|
Python
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitMediaCensorJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitMediaCensorJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitMediaCensorJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class SubmitMediaCensorJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'SubmitMediaCensorJob')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Title(self):
return self.get_query_params().get('Title')
def set_Title(self,Title):
self.add_query_param('Title',Title)
def get_VideoCensorConfig(self):
return self.get_query_params().get('VideoCensorConfig')
def set_VideoCensorConfig(self,VideoCensorConfig):
self.add_query_param('VideoCensorConfig',VideoCensorConfig)
def get_UserData(self):
return self.get_query_params().get('UserData')
def set_UserData(self,UserData):
self.add_query_param('UserData',UserData)
def get_CoverImages(self):
return self.get_query_params().get('CoverImages')
def set_CoverImages(self,CoverImages):
self.add_query_param('CoverImages',CoverImages)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PipelineId(self):
return self.get_query_params().get('PipelineId')
def set_PipelineId(self,PipelineId):
self.add_query_param('PipelineId',PipelineId)
def get_Input(self):
return self.get_query_params().get('Input')
def set_Input(self,Input):
self.add_query_param('Input',Input)
def get_Barrages(self):
return self.get_query_params().get('Barrages')
def set_Barrages(self,Barrages):
self.add_query_param('Barrages',Barrages)
| 32.650485
| 74
| 0.766875
|
ca32fbb656a1e046fa391e51f8d55e92df3dabcc
| 310
|
py
|
Python
|
app.py
|
cryptus-neoxys/news-api
|
9010c776256f07279a104fed7499b112c071eeb5
|
[
"MIT"
] | null | null | null |
app.py
|
cryptus-neoxys/news-api
|
9010c776256f07279a104fed7499b112c071eeb5
|
[
"MIT"
] | null | null | null |
app.py
|
cryptus-neoxys/news-api
|
9010c776256f07279a104fed7499b112c071eeb5
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, jsonify, redirect
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
# @app.route("/bitcoin/")
# def bitcoin():
# return render_template("./data/bitcoin.json")
if __name__ == '__main__':
app.run(debug=True)
| 22.142857
| 68
| 0.687097
|
266f632fa7716f9a872dc381aa965c3cbb462f5b
| 3,211
|
py
|
Python
|
crops/command_line/crops-renumber.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | null | null | null |
crops/command_line/crops-renumber.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | 5
|
2020-07-17T08:45:22.000Z
|
2022-03-11T13:39:26.000Z
|
crops/command_line/crops-renumber.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | 1
|
2020-07-07T15:42:07.000Z
|
2020-07-07T15:42:07.000Z
|
"""==========
This script will renumber a structure file in agreement with the
residue positions in the sequence file corresponding to that structure.
Non-polymer elements are numbered starting right after the final (TER) residue.
IMPORTANT: If the input sequence and the input structure files are not from the
same source (e.g. RCSB PDB) a source conflict might occur making the
renumbering operation unsuccessful even if the program does not crash.
"""
from crops.about import __prog__, __description__, __author__, __date__, __version__
import argparse
import os
from crops.io import check_path
from crops.io import outpathgen
from crops.io import parsers as cin
from crops.core import ops as cop
from crops import command_line as ccl
logger=None
def create_argument_parser():
"""Create a parser for the command line arguments used in crops-renumber"""
parser = argparse.ArgumentParser(prog=__prog__, formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__+' ('+__prog__+') v.'+__version__+'\n'+__doc__)
parser.add_argument("input_seqpath",nargs=1, metavar="Sequence_filepath",
help="Input sequence filepath.")
parser.add_argument("input_strpath",nargs=1, metavar="Structure_filepath",
help="Input structure filepath or dir. If a directory is inserted, it will act on all structure files in such directory.")
parser.add_argument("-o","--outdir",nargs=1,metavar="Output_Directory",
help="Set output directory path. If not supplied, default is the one containing the input sequence.")
parser.add_argument('--version', action='version', version='%(prog)s '+ __version__)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
global logger
logger = ccl.crops_logger(level="info")
logger.info(ccl.welcome())
inseq=check_path(args.input_seqpath[0],'file')
instr=check_path(args.input_strpath[0])
if args.outdir is None:
outdir=check_path(os.path.dirname(inseq),'dir')
else:
outdir=check_path(os.path.join(args.outdir[0],''),'dir')
infixlbl=".crops.seq"
logger.info('Parsing sequence file '+inseq)
seqset=cin.parseseqfile(inseq)
logger.info('Done')
logger.info('Parsing structure file '+instr)
strset, fileset=cin.parsestrfile(instr)
logger.info('Done')
logger.info('Renumbering structure(s)...')
for pdbid, structure in strset.items():
if pdbid in seqset:
newstructure=cop.renumber_pdb(seqset[pdbid],structure)
outstr=outpathgen(outdir,subdir=pdbid,filename=pdbid+infixlbl+os.path.splitext(instr)[1],mksubdir=True)
#newstructure.write_pdb(outstr)
newstructure.write_minimal_pdb(outstr)
logger.info('Done\n')
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
logger.info(ccl.ok())
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
logger.critical(msg)
sys.exit(1)
| 35.677778
| 146
| 0.690128
|
932e328a5dcade53e008ead756f0f221238049eb
| 1,044
|
py
|
Python
|
hackerrank/whatFlavors.py
|
stephanosterburg/coding_challenges
|
601cf1360a7fdf068487106ba995955407365983
|
[
"MIT"
] | null | null | null |
hackerrank/whatFlavors.py
|
stephanosterburg/coding_challenges
|
601cf1360a7fdf068487106ba995955407365983
|
[
"MIT"
] | null | null | null |
hackerrank/whatFlavors.py
|
stephanosterburg/coding_challenges
|
601cf1360a7fdf068487106ba995955407365983
|
[
"MIT"
] | null | null | null |
import os
# Sample Input
# 2
# 4
# 5
# 1 4 5 3 2
# 4
# 4
# 2 2 4 3
#
# Sample Output
# 1 4
# 1 2
#
# They pool together money = $4; there are five flavors avaiilable.
# The flavors 1 and 4 have a total cost of 1 + 3 = 4
#
# Print two space-separated integers denoting the respective indices for the two
# distinct flavors they choose to purchase in ascending order.
#
def whatFlavors(cost: list, money: int):
cost_dict = {}
for k, v in enumerate(cost):
if money - v in cost_dict:
print(str(cost_dict[money - v] + 1) + ' ' + str(k + 1))
return
else:
cost_dict[v] = k
if __name__ == '__main__':
pwd = os.getcwd()
filename = open(pwd + "/ctci-ice-cream-parlor-testcases/input/input14.txt", 'r')
t = list(map(int, filename.readline().split()))[0]
for t_itr in range(t):
money = int(filename.readline().split()[0])
n = filename.readline().split()[0]
cost = list(map(int, filename.readline().rstrip().split()))
whatFlavors(cost, money)
| 23.727273
| 84
| 0.603448
|
e0e9e77359c916193a931d5b0a58d9f3a95fb631
| 147
|
py
|
Python
|
Code_Challenges/print_array_pairs.py
|
fuse999/Python_Sandbox
|
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
|
[
"MIT"
] | null | null | null |
Code_Challenges/print_array_pairs.py
|
fuse999/Python_Sandbox
|
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
|
[
"MIT"
] | null | null | null |
Code_Challenges/print_array_pairs.py
|
fuse999/Python_Sandbox
|
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
|
[
"MIT"
] | null | null | null |
array = ['Joe', 2, 'Ted', 4.98, 14, 'Sam', 'void *', '42', 'float', 'pointers', 5006]
# For each item in array
for x in array:
#
print(x)
| 24.5
| 85
| 0.530612
|
c34d7b07e01b10fe851b321449913256d6930ac2
| 145
|
py
|
Python
|
server/ums/tests/test_load.py
|
dealenx/hpccloud-kemsu
|
42fc44b06385c6eb25a979477dcea53fe66cfbfa
|
[
"Apache-2.0"
] | 3
|
2020-11-06T10:59:51.000Z
|
2022-01-23T18:10:27.000Z
|
server/ums/tests/test_load.py
|
dealenx/hpccloud-kemsu
|
42fc44b06385c6eb25a979477dcea53fe66cfbfa
|
[
"Apache-2.0"
] | 11
|
2020-04-19T12:05:19.000Z
|
2022-03-02T11:35:32.000Z
|
server/ums/tests/test_load.py
|
dealenx/hpccloud-kemsu
|
42fc44b06385c6eb25a979477dcea53fe66cfbfa
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from girder.plugin import loadedPlugins
@pytest.mark.plugin('ums')
def test_import(server):
assert 'ums' in loadedPlugins()
| 16.111111
| 39
| 0.758621
|
09db831cd5d590795a0587b4143063761cfd46b4
| 6,269
|
py
|
Python
|
src/gemini3d/utils.py
|
qwsae10/pygemini
|
adc6b2401ac9fc8b7cb1fc8870322f730a3383a3
|
[
"Apache-2.0"
] | null | null | null |
src/gemini3d/utils.py
|
qwsae10/pygemini
|
adc6b2401ac9fc8b7cb1fc8870322f730a3383a3
|
[
"Apache-2.0"
] | null | null | null |
src/gemini3d/utils.py
|
qwsae10/pygemini
|
adc6b2401ac9fc8b7cb1fc8870322f730a3383a3
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
import subprocess
import os
import shutil
from pathlib import Path
from datetime import datetime, timedelta
import typing as T
import logging
import importlib
import imp
import xarray
import numpy as np
Pathlike = T.Union[str, Path]
__all__ = ["to_datetime", "git_meta", "datetime2ymd_hourdec"]
def str2func(name: str, path: Path = None) -> T.Callable:
"""
expects one of (in priority order):
0. file in "path" (if present)
1. os.getcwd()/name.py containing function name()
2. gemiin3d.<foo> <foo>/name.py module file containing function name()
3. gemini3d.<foo> <foo>/__init__.py containing function name()
Examples:
1. os.getcwd()/perturb.py with function perturb()
2. gemini3d.efield.Efield_erf returns function Efield_erf()
"""
mod_name = ".".join(name.split(".")[:-1])
func_name = name.split(".")[-1]
if mod_name:
try:
# file with function of same name
mod = importlib.import_module(name)
return getattr(mod, func_name)
except (ModuleNotFoundError, AttributeError):
# __init__.py with function
mod = importlib.import_module(mod_name)
return getattr(mod, func_name)
else:
if path is not None:
fid = imp.find_module(name, [path]) # type: ignore
try:
mod = imp.load_module(name, *fid) # type: ignore
finally:
# higher except: may catch, so avoid resource leaks
if fid[0]:
fid[0].close()
else:
# file in current working directory
mod = importlib.import_module(func_name)
return getattr(mod, func_name)
def to_datetime(times: xarray.DataArray | np.datetime64 | datetime) -> datetime:
"""
Parameters
----------
atimes : xarray time
Returns
-------
times : list[datetime.datetime]
"""
if isinstance(times, datetime):
time = times
elif isinstance(times, xarray.DataArray):
time = times.data.squeeze()[()] # numpy.datetime64
elif isinstance(times, np.datetime64):
time = times.squeeze()[()]
else:
raise TypeError("expected datetime-like value")
if isinstance(time, np.datetime64):
time = time.astype("datetime64[us]").astype(datetime)
return time
def git_meta(path: Path = None) -> dict[str, str]:
"""
provide metadata about a Git repo in a dictionary
Dev note: use subprocess.run to avoid crashing program when Git meta is missing or broken (shallow clone)
empty init in case can't read Git info
this avoids needless if statements in consumers
"""
git = shutil.which("git")
meta = {
"version": "",
"remote": "",
"branch": "",
"commit": "",
"porcelain": "false",
}
if not git:
return meta
if not path:
if __file__:
path = Path(__file__).resolve().parent
else:
return meta
ret = subprocess.run([git, "-C", str(path), "--version"], stdout=subprocess.PIPE, text=True)
if ret.returncode != 0:
logging.error("Git was not available or is too old")
return meta
meta["version"] = ret.stdout.strip()
ret = subprocess.run([git, "-C", str(path), "rev-parse"])
if ret.returncode != 0:
logging.error(f"{path} is not a Git repo.")
return meta
ret = subprocess.run(
[git, "-C", str(path), "rev-parse", "--abbrev-ref", "HEAD"],
stdout=subprocess.PIPE,
text=True,
)
if ret.returncode != 0:
logging.error(f"{path} could not determine Git branch")
return meta
meta["branch"] = ret.stdout.strip()
ret = subprocess.run(
[git, "-C", str(path), "remote", "get-url", "origin"], stdout=subprocess.PIPE, text=True
)
if ret.returncode != 0:
logging.error(f"{path} could not determine Git remote")
return meta
meta["remote"] = ret.stdout.strip()
ret = subprocess.run(
[git, "-C", str(path), "describe", "--tags"], stdout=subprocess.PIPE, text=True
)
if ret.returncode != 0:
ret = subprocess.run(
[git, "-C", str(path), "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE,
text=True,
)
if ret.returncode != 0:
logging.error(f"{path} could not determine Git commit")
return meta
meta["commit"] = ret.stdout.strip()
ret = subprocess.run(
[git, "-C", str(path), "status", "--porcelain"], stdout=subprocess.PIPE, text=True
)
if ret.returncode != 0:
logging.error(f"{path} could not determine Git status")
msg = ret.stdout.strip()
meta["porcelain"] = "false" if msg else "true"
return meta
def get_cpu_count() -> int:
"""get a physical CPU count
Note: len(os.sched_getaffinity(0)) and multiprocessing.cpu_count don't help either
PSUtil is the most reliable, so we strongly recommend it.
Returns
-------
count: int
detect number of physical CPU
"""
import psutil
extradiv = 1
max_cpu = None
# without psutil, hyperthreaded CPU may overestimate physical count by factor of 2 (or more)
if psutil is not None:
max_cpu = psutil.cpu_count(logical=False)
if max_cpu is None:
max_cpu = psutil.cpu_count()
extradiv = 2
if max_cpu is None:
max_cpu = os.cpu_count()
if max_cpu is not None:
extradiv = 2
else:
max_cpu = 1
return max_cpu // extradiv
def datetime2ymd_hourdec(dt: datetime) -> str:
"""
convert datetime to ymd_hourdec string for filename stem
"""
dt = to_datetime(dt)
assert isinstance(dt, datetime), "expect scalar datetime"
return (
dt.strftime("%Y%m%d")
+ f"_{dt.hour*3600 + dt.minute*60 + dt.second + dt.microsecond/1e6:12.6f}"
)
def filename2datetime(path: Path) -> datetime:
"""
Gemini3D datafiles use a file naming pattern that we translate to a datetime
path need not exist.
"""
name = path.name
return datetime.strptime(name[:8], "%Y%m%d") + timedelta(seconds=float(name[9:21]))
| 27.138528
| 109
| 0.598182
|
736ae91a29017e07c90acad235bf6c70e93ca0b2
| 461
|
py
|
Python
|
setup.py
|
ccollins/pest
|
99cd6bf47e86b3ff2d17d4c83ed564d61989edbb
|
[
"MIT"
] | 1
|
2016-07-27T12:57:06.000Z
|
2016-07-27T12:57:06.000Z
|
setup.py
|
ccollins/pest
|
99cd6bf47e86b3ff2d17d4c83ed564d61989edbb
|
[
"MIT"
] | null | null | null |
setup.py
|
ccollins/pest
|
99cd6bf47e86b3ff2d17d4c83ed564d61989edbb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
setup(
name="pest",
version="1.0.3",
include_package_data=True,
package_data = {'images':['*.*']},
packages=['pest'],
scripts=['pest/pester'],
install_requires=['pyobjc-framework-FSEvents','growl-py'],
description="Auto tester for python",
license = "MIT",
author="Chuck Collins",
author_email="chuck.collins@gmail.com",
url="http://github.com/ccollins/pest",
)
| 27.117647
| 62
| 0.646421
|
be7c7212db8dc68e35d6ad6bc4f50ea04543e4c7
| 21,551
|
py
|
Python
|
tools/autograd/gen_autograd_functions.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 183
|
2018-04-06T21:10:36.000Z
|
2022-03-30T15:05:24.000Z
|
tools/autograd/gen_autograd_functions.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 818
|
2020-02-07T02:36:44.000Z
|
2022-03-31T23:49:44.000Z
|
tools/autograd/gen_autograd_functions.py
|
xiaohanhuang/pytorch
|
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
|
[
"Intel"
] | 58
|
2018-06-05T16:40:18.000Z
|
2022-03-16T15:37:29.000Z
|
# Generates C++ autograd functions for the derivatives of ATen operations
#
# This writes two files:
# Functions.h/cpp: subclasses of autograd::Node
# python_functions.h/cpp: Python bindings for the above classes
#
from .gen_inplace_or_view_type import VIEW_FUNCTIONS
from typing import List, Sequence, Tuple
from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
SavedAttribute, uses_retain_variables,
uses_single_grad)
from tools.codegen.api.types import (Binding, BaseCType, OptionalCType, tensorT, longT,
doubleT, scalarT, stringT, boolT, intArrayRefT,
tensorListT, MutRefCType, ListCType, ArrayRefCType)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.utils import FileManager
from tools.codegen.model import Argument
FUNCTION_DECLARATION = CodeTemplate("""\
struct TORCH_API ${op} : public ${superclass} {
using ${superclass}::${superclass};
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "${op}"; }
void release_variables() override {
${thread_lock}
${release_variables}
}
${will_release_variables}
${saved_variables}
${saved_list_sizes}
};
""")
WILL_RELEASE_VARIABLES = CodeTemplate("""\
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
""")
FUNCTION_DEFINITION = CodeTemplate("""\
variable_list ${op}::apply(variable_list&& grads) {
${thread_lock}
${asserts}
IndexRangeGenerator gen;
${compute_index_ranges}
variable_list grad_inputs(gen.size());
${body}
return grad_inputs;
}
""")
GRAD_INPUT_MASK = CodeTemplate("""\
auto grad_input_mask = std::array<bool, ${n}>{
${masks}
};\
""")
DERIVATIVE_SINGLE = CodeTemplate("""\
if (should_compute_output({ ${name}_ix })) {
auto grad_result = ${derivative};
copy_range(grad_inputs, ${name}_ix, grad_result);
}
""")
DERIVATIVE_MULTI_COPY_RANGE = CodeTemplate("""\
if (should_compute_output({ ${name}_ix })) {
copy_range(grad_inputs, ${name}_ix, std::get<${i}>(grad_result));
}
""")
DERIVATIVE_MULTI = CodeTemplate("""\
if (should_compute_output({ ${idx_ranges} })) {
${grad_input_mask}
auto grad_result = ${derivative};
${copy_ranges}
}
""")
# Generates python bindings
#
# This generates the definitions for:
# (1) The PyTypeObject for each backward grad_fn subclassing Node
# (2) The entry for PyTypeObject's tp_getset slot (an array of PyGetSetDef structs)
# We generate one PyGetSetDef struct for each of grad_fn's saved inputs and outputs
# Each PyGetSetDef has a function ptr to a getter, also defined here (3).
# (3) Getters for each of grad_fn's saved inputs and outputs.
#
PY_FUNCTION_DEFINITION = CodeTemplate("""\
static PyTypeObject ${op}Class;
addClass<${op}>(${op}Class, "${op}", ${op}_properties);
""")
PY_FUNCTION_PROPS_AND_GETTERS = CodeTemplate("""\
${all_getter_definitions}
static struct PyGetSetDef ${op}_properties[] = {
THP_FUNCTION_DEFAULT_PROPERTIES,
${all_getsetdef_structs}
{nullptr} /* sentinel */
};
""")
PY_GETSETDEF_STRUCT = CodeTemplate("""\
{(char*)"_saved_${name}", (getter)THP${op}_${name}_getter, nullptr, nullptr, nullptr}""")
PY_RAW_GETSETDEF_STRUCT = CodeTemplate("""\
{(char*)"_raw_saved_${name}", (getter)THP${op}_${name}_raw_getter, nullptr, nullptr, nullptr}""")
# Getter templates
GETTER_DEFINITION = CodeTemplate("""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto prop = static_cast<${op}*>(self->cdata.get())->${name};
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_SAVEDVAR = CodeTemplate("""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_RAW_SAVEDVAR = CodeTemplate("""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_VEC_SAVEDVAR = CodeTemplate("""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
if (node->${name}_released_) {
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
return nullptr;
}
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_RAW_VEC_SAVEDVAR = CodeTemplate("""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
if (node->${name}_released_) {
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
return nullptr;
}
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_OPT = CodeTemplate("""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.has_value()) {
Py_RETURN_NONE;
}
auto prop = opt_prop.value();
${body}
END_HANDLE_TH_ERRORS
}
""")
GETTER_DEFINITION_OPT_ARRAYREF = CodeTemplate("""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.list.has_value()) {
Py_RETURN_NONE;
}
auto prop = opt_prop.list.value();
${body}
END_HANDLE_TH_ERRORS
}
""")
# Getter body
GETTER_BODY_SAVEDVAR = """\
return THPVariable_Wrap(prop.unpack(self->cdata));
"""
GETTER_BODY_RAW_SAVEDVAR = """\
pybind11::object obj = pybind11::cast(prop, pybind11::return_value_policy::reference);
return obj.release().ptr();
"""
GETTER_BODY_VEC_SAVEDVAR = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (int i = 0; i < prop.size(); i++) {
PyTuple_SetItem(tup, (Py_ssize_t) i, THPVariable_Wrap(prop[i].unpack(self->cdata)));
}
return tup;
"""
GETTER_BODY_RAW_VEC_SAVEDVAR = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (int i = 0; i < prop.size(); i++) {
pybind11::object obj = pybind11::cast(prop[i], pybind11::return_value_policy::reference);
PyTuple_SetItem(tup, (Py_ssize_t) i, obj.release().ptr());
}
return tup;
"""
GETTER_BODY_ARRAYREF_LONG = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (int i = 0; i < prop.size(); i++) {
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong((uint64_t) prop[i]));
}
return tup;
"""
GETTER_BODY_ARRAYREF_DOUBLE = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (int i = 0; i < prop.size(); i++) {
PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble((double) prop[i]));
}
return tup;
"""
GETTER_BODY_INT64_T = """\
return PyLong_FromUnsignedLong((int64_t) prop);
"""
GETTER_BODY_DOUBLE = """\
return PyFloat_FromDouble((double) prop);
"""
GETTER_BODY_BOOL = """\
if (prop) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
"""
GETTER_BODY_STRING = """\
return PyUnicode_FromStringAndSize(prop.data(), prop.size());
"""
GETTER_BODY_SCALAR = """\
if (prop.isComplex()) {
auto cprop = prop.to<c10::complex<double>>();
return PyComplex_FromDoubles(cprop.real(), cprop.imag());
} else if (prop.isFloatingPoint()) {
return PyFloat_FromDouble(prop.to<double>());
} else if (prop.isIntegral(/*includeBool=*/false)) {
return PyLong_FromLong(prop.to<int64_t>());
} else if (prop.isBoolean()) {
if (prop.to<bool>()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
} else {
PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
return nullptr;
}
"""
MISC_GETTER_DEFS = {
OptionalCType(BaseCType(longT)): (GETTER_DEFINITION_OPT, GETTER_BODY_INT64_T),
BaseCType(doubleT): (GETTER_DEFINITION, GETTER_BODY_DOUBLE),
OptionalCType(BaseCType(doubleT)): (GETTER_DEFINITION_OPT, GETTER_BODY_DOUBLE),
BaseCType(boolT): (GETTER_DEFINITION, GETTER_BODY_BOOL),
BaseCType(scalarT): (GETTER_DEFINITION, GETTER_BODY_SCALAR),
OptionalCType(BaseCType(scalarT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SCALAR),
}
# These functions have backwards which cannot be traced, and so must have
# their backward functions traced opaquely.
# VIEW_FUNCTIONS are not traceable because they use as_strided, which
# has an untraceable backwards, see
# https://github.com/pytorch/pytorch/issues/4250
# TODO: This is probably not exhaustive, but it's a start
UNTRACEABLE_FUNCTIONS = VIEW_FUNCTIONS
def gen_autograd_functions_lib(
out: str,
differentiability_infos: Sequence[DifferentiabilityInfo],
template_path: str,
) -> None:
"""Functions.h and Functions.cpp body
These contain the auto-generated subclasses of torch::autograd::Node
for each every differentiable torch function.
"""
# only create an autograd function if we are actually going to calculate a derivative
infos = list(filter(lambda info: info.args_with_derivatives, differentiability_infos))
declarations = list(map(lambda f: process_function(f, FUNCTION_DECLARATION), infos))
definitions = list(map(lambda f: process_function(f, FUNCTION_DEFINITION), infos))
file_basename = 'Functions'
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
for suffix in ['.h', '.cpp']:
fname = file_basename + suffix
fm.write_with_template(fname, fname, lambda: {
'generated_comment': '@' + f'generated from {fm.template_dir}/' + fname,
'autograd_function_declarations': declarations,
'autograd_function_definitions': definitions,
})
def gen_autograd_functions_python(
out: str,
differentiability_infos: Sequence[DifferentiabilityInfo],
template_path: str,
) -> None:
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
num_shards = 5
fm.write('python_functions.h', lambda: {
'generated_comment': f'@generated from {fm.template_dir}/python_functions.h',
'shard_forward_declare': [
f"void initialize_autogenerated_functions_{i}();"
for i in range(num_shards)
],
'shard_call': [
f"initialize_autogenerated_functions_{i}();"
for i in range(num_shards)
]
})
infos = list(filter(lambda info: info.args_with_derivatives, differentiability_infos))
fm.write_sharded(
'python_functions.cpp',
infos,
key_fn=lambda info: info.name,
base_env={
'generated_comment': f'@generated from {fm.template_dir}/python_functions.cpp',
},
env_callable=lambda info: {
'py_function_initializers': [process_function(info, PY_FUNCTION_DEFINITION)],
'py_function_props_and_getters': [process_function(info, PY_FUNCTION_PROPS_AND_GETTERS)],
},
num_shards=num_shards,
sharded_keys={'py_function_initializers', 'py_function_props_and_getters'}
)
def process_function(info: DifferentiabilityInfo, template: CodeTemplate) -> str:
saved_variables: List[str] = []
release_variables: List[str] = []
saved_list_sizes: List[str] = []
unpack: List[str] = []
asserts: List[str] = []
compute_index_ranges: List[str] = []
getter_definitions: List[str] = []
py_getsetdef_structs: List[str] = []
for arg in info.args_with_derivatives:
if arg.type == 'at::TensorList' or arg.type == 'const c10::List<c10::optional<at::Tensor>> &':
size = f'{arg.name}_size_'
saved_list_sizes.append(f'size_t {arg.name}_size_;')
else:
size = '1'
compute_index_ranges.append(f'auto {arg.name}_ix = gen.range({size});')
def save_var(var: SavedAttribute, is_output: bool) -> None:
name = var.nctype.name
type = var.nctype.type
should_append_getsetdef = True
should_append_raw_getsetdef = False
if type == BaseCType(tensorT) or type == OptionalCType(BaseCType(tensorT)) or \
type == MutRefCType(OptionalCType(BaseCType(tensorT))) or \
(type == BaseCType(scalarT) and is_output):
saved_variables.append(f'SavedVariable {name}_;')
release_variables.append(f'{name}_.reset_data();')
ptr = 'shared_from_this()' if is_output else ''
unpack.append(f'auto {name} = {name}_.unpack({ptr});')
getter_definitions.append(GETTER_DEFINITION_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_SAVEDVAR))
getter_definitions.append(GETTER_DEFINITION_RAW_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_SAVEDVAR))
should_append_raw_getsetdef = True
elif type == BaseCType(tensorListT):
saved_variables.append(f'std::vector<SavedVariable> {name}_;')
saved_variables.append(f'bool {name}_released_ = false;')
# Just clear() is sufficient, we don't need to loop and clear each variable.
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
release_variables.append(f'{name}_.clear();')
release_variables.append(f'{name}_released_ = true;')
unpack.append(f'auto {name} = unpack_list({name}_);')
asserts.append(f'TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);')
getter_definitions.append(GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR))
getter_definitions.append(GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR))
should_append_raw_getsetdef = True
elif type == ListCType(OptionalCType(BaseCType(tensorT))):
saved_variables.append(f'std::vector<SavedVariable> {name}_;')
saved_variables.append(f'bool {name}_released_ = false;')
# Just clear() is sufficient, we don't need to loop and clear each variable.
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
release_variables.append(f'{name}_.clear();')
release_variables.append(f'{name}_released_ = true;')
unpack.append(f'auto {name} = unpack_opt_list({name}_);')
asserts.append(f'TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);')
getter_definitions.append(GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR))
getter_definitions.append(GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR))
should_append_raw_getsetdef = True
elif type == BaseCType(intArrayRefT):
saved_variables.append(f'std::vector<int64_t> {name};')
getter_definitions.append(GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG))
elif type == OptionalCType(BaseCType(intArrayRefT)):
saved_variables.append(f'c10::OptionalArray<int64_t> {name};')
getter_definitions.append(GETTER_DEFINITION_OPT_ARRAYREF.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG))
elif type == OptionalCType(ArrayRefCType(BaseCType(doubleT))):
saved_variables.append(f'c10::OptionalArray<double> {name};')
getter_definitions.append(GETTER_DEFINITION_OPT_ARRAYREF.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_DOUBLE))
elif type == BaseCType(longT):
saved_variables.append(f'{type.cpp_type()} {name} = 0;')
getter_definitions.append(GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_INT64_T))
elif type == BaseCType(stringT):
saved_variables.append(f'std::string {name};')
getter_definitions.append(GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_STRING))
elif type == OptionalCType(BaseCType(stringT)):
saved_variables.append(f'c10::optional<std::string> {name};')
getter_definitions.append(GETTER_DEFINITION_OPT.substitute(
op=info.op, name=name, body=GETTER_BODY_STRING))
else:
saved_variables.append(f'{type.cpp_type()} {name};')
if type in MISC_GETTER_DEFS:
getter_def, body = MISC_GETTER_DEFS[type]
getter_definitions.append(getter_def.substitute(op=info.op, name=name, body=body))
else:
# Types we don't expose python bindings to yet:
# TypeAndSize, at::ScalarType, TensorOptions, TensorGeometry,
# std::vector<std::vector<int64_t>>, std::vector<at::ScalarType>
should_append_getsetdef = False
if should_append_getsetdef:
py_getsetdef_structs.append(PY_GETSETDEF_STRUCT.substitute(op=info.op, name=name))
if should_append_raw_getsetdef:
py_getsetdef_structs.append(PY_RAW_GETSETDEF_STRUCT.substitute(op=info.op, name=name))
for var in info.all_saved_inputs:
save_var(var, is_output=False)
for var in info.all_saved_outputs:
save_var(var, is_output=True)
# lock the mutex when we release variables and in Node::apply to protect thread safety
# see Note [Thread Safety on Autograd Node]
if len(release_variables) > 0:
thread_lock = 'std::lock_guard<std::mutex> lock(mutex_);'
else:
thread_lock = ''
if uses_retain_variables(info):
will_release_variables = WILL_RELEASE_VARIABLES.substitute()
else:
will_release_variables = ''
body: List[str] = []
if uses_single_grad(info):
body.append('const auto& grad = grads[0];')
else:
# Generate aliases for gradients named for returned values.
body.extend(
f'const auto& {name} = grads[{info.available_named_gradients.index(name)}];'
for name in info.used_named_gradients)
def emit_derivative(
derivative: Derivative,
args_with_derivatives: Sequence[Binding],
) -> Tuple[bool, str]:
formula = derivative.formula
var_names = derivative.var_names
if len(var_names) == 1:
checks_any_grad_defined = False
if 'not_implemented' not in formula:
matching_args = [
arg for arg in args_with_derivatives
if arg.name == var_names[0]]
if len(matching_args) == 1:
# We can add undefined grad support if the input variable is a Tensor
arg = matching_args[0]
if isinstance(arg.argument, Argument) and str(arg.argument.type) in ('Tensor', 'Tensor?'):
formula = 'any_grad_defined ? (' + formula + ') : Tensor()'
checks_any_grad_defined = True
return (checks_any_grad_defined,
DERIVATIVE_SINGLE.substitute(name=var_names[0], derivative=formula))
else:
if 'grad_input_mask' in formula:
masks = [f'should_compute_output({{ {n}_ix }}),' for n in var_names]
grad_input_mask = GRAD_INPUT_MASK.substitute(masks=masks, n=len(var_names))
else:
grad_input_mask = ''
idx_ranges = ', '.join(f'{n}_ix' for n in var_names)
copy_ranges: List[str] = []
for i, n in enumerate(var_names):
copy_ranges.append(DERIVATIVE_MULTI_COPY_RANGE.substitute(name=n, i=i))
return False, DERIVATIVE_MULTI.substitute(
idx_ranges=idx_ranges, copy_ranges=copy_ranges,
derivative=formula,
grad_input_mask=grad_input_mask)
body.extend(unpack)
need_any_grad_defined_var = False
for derivative in info.derivatives:
checks_any_grad_defined, derivative_text = emit_derivative(derivative, info.args_with_derivatives)
body.append(derivative_text)
need_any_grad_defined_var |= checks_any_grad_defined
# Since single-output derivative formulas need to check if grads are
# defined, only perform the check once, before all the formulas
if need_any_grad_defined_var:
body.insert(-len(info.derivatives),
'bool any_grad_defined = any_variable_defined(grads);')
if info.name in UNTRACEABLE_FUNCTIONS:
superclass = 'Node'
else:
superclass = 'TraceableFunction'
all_getsetdef_structs = ",\n".join(py_getsetdef_structs) + "," if len(py_getsetdef_structs) != 0 else ""
all_getter_definitions = "\n".join(getter_definitions)
return template.substitute(
op=info.op,
compute_index_ranges=compute_index_ranges,
saved_variables=saved_variables,
release_variables=release_variables,
saved_list_sizes=saved_list_sizes,
asserts=asserts,
thread_lock=thread_lock,
will_release_variables=will_release_variables,
body=body,
superclass=superclass,
all_getter_definitions=all_getter_definitions,
all_getsetdef_structs=all_getsetdef_structs
)
| 38.621864
| 123
| 0.673611
|
6db2a1bbbeb568c4cc0a6c3ef0ab17039247966a
| 5,303
|
py
|
Python
|
utils/format_data.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 1,540
|
2017-11-14T13:26:33.000Z
|
2021-11-09T14:05:08.000Z
|
utils/format_data.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 599
|
2017-11-14T13:19:12.000Z
|
2021-11-09T01:58:26.000Z
|
utils/format_data.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 449
|
2017-11-14T12:48:46.000Z
|
2021-11-06T09:34:33.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""format manifest with more metadata."""
import argparse
import functools
import json
import jsonlines
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.io.utility import feat_type
from paddlespeech.s2t.utils.utility import add_arguments
from paddlespeech.s2t.utils.utility import print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('cmvn_path', str,
'examples/librispeech/data/mean_std.json',
"Filepath of cmvn.")
add_arg('unit_type', str, "char", "Unit type, e.g. char, word, spm")
add_arg('vocab_path', str,
'examples/librispeech/data/vocab.txt',
"Filepath of the vocabulary.")
add_arg('manifest_paths', str,
None,
"Filepaths of manifests for building vocabulary. "
"You can provide multiple manifest files.",
nargs='+',
required=True)
# bpe
add_arg('spm_model_prefix', str, None,
"spm model prefix, spm_model_%(bpe_mode)_%(count_threshold), only need when `unit_type` is spm")
add_arg('output_path', str, None, "filepath of formated manifest.", required=True)
# yapf: disable
args = parser.parse_args()
def main():
print_arguments(args, globals())
fout = open(args.output_path, 'w', encoding='utf-8')
# get feat dim
filetype = args.cmvn_path.split(".")[-1]
mean, istd = load_cmvn(args.cmvn_path, filetype=filetype)
feat_dim = mean.shape[0] #(D)
print(f"Feature dim: {feat_dim}")
text_feature = TextFeaturizer(args.unit_type, args.vocab_path, args.spm_model_prefix)
vocab_size = text_feature.vocab_size
print(f"Vocab size: {vocab_size}")
# josnline like this
# {
# "input": [{"name": "input1", "shape": (100, 83), "feat": "xxx.ark:123"}],
# "output": [{"name":"target1", "shape": (40, 5002), "text": "a b c de"}],
# "utt2spk": "111-2222",
# "utt": "111-2222-333"
# }
count = 0
for manifest_path in args.manifest_paths:
with jsonlines.open(str(manifest_path), 'r') as reader:
manifest_jsons = list(reader)
for line_json in manifest_jsons:
output_json = {
"input": [],
"output": [],
'utt': line_json['utt'],
'utt2spk': line_json.get('utt2spk', 'global'),
}
# output
line = line_json['text']
if isinstance(line, str):
# only one target
tokens = text_feature.tokenize(line)
tokenids = text_feature.featurize(line)
output_json['output'].append({
'name': 'target1',
'shape': (len(tokenids), vocab_size),
'text': line,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
else:
# isinstance(line, list), multi target in one vocab
for i, item in enumerate(line, 1):
tokens = text_feature.tokenize(item)
tokenids = text_feature.featurize(item)
output_json['output'].append({
'name': f'target{i}',
'shape': (len(tokenids), vocab_size),
'text': item,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
# input
line = line_json['feat']
if isinstance(line, str):
# only one input
feat_shape = line_json['feat_shape']
assert isinstance(feat_shape, (list, tuple)), type(feat_shape)
filetype = feat_type(line)
if filetype == 'sound':
feat_shape.append(feat_dim)
else: # kaldi
raise NotImplementedError('no support kaldi feat now!')
output_json['input'].append({
"name": "input1",
"shape": feat_shape,
"feat": line,
"filetype": filetype,
})
else:
# isinstance(line, list), multi input
raise NotImplementedError("not support multi input now!")
fout.write(json.dumps(output_json) + '\n')
count += 1
print(f"{args.manifest_paths} Examples number: {count}")
fout.close()
if __name__ == '__main__':
main()
| 37.083916
| 101
| 0.577975
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.