hexsha
stringlengths
40
40
size
int64
3
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
972
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
972
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
972
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
3
1.03M
avg_line_length
float64
1.13
941k
max_line_length
int64
2
941k
alphanum_fraction
float64
0
1
ebae5eb2311cc50c4233fa23e995ee509d6c6d69
1,197
py
Python
timer.py
jomlearn2/clock
6ce3a35841508adf5310598e69ff9f70d95e5a3f
[ "CC0-1.0" ]
null
null
null
timer.py
jomlearn2/clock
6ce3a35841508adf5310598e69ff9f70d95e5a3f
[ "CC0-1.0" ]
null
null
null
timer.py
jomlearn2/clock
6ce3a35841508adf5310598e69ff9f70d95e5a3f
[ "CC0-1.0" ]
null
null
null
# import only system from os from os import system, name import time from datetime import datetime, timedelta # defining setcount function /setting the countdown def setcount(): global hrs global mins global secs global totalsecs print('Set the countdown timer:') hrs = int(input('hours: ')) mins = int(input('minutes: ')) secs = int(input('seconds: ')) totalsecs = 3600 * hrs + 60 * mins + secs # defining countdown function /running the countdown def countdown(): run = str(input('Start? (y/n) > ')) # Only run if the user types in "y" if run == "y": ltotalsecs = totalsecs while ltotalsecs != 0: sec = timedelta(seconds=int(ltotalsecs)) d = datetime(1, 1, 1) + sec print("%d hours %d minutes %d seconds left" % (d.hour, d.minute, d.second)) # delay for a second time.sleep(1) # decrement the local seconds total ltotalsecs -= 1 # clearing the previous statement clear() if ltotalsecs == 0: print('Time is UP!') # defining clear function def clear(): # for windows if name == 'nt': a = system('cls') # for mac and linux(here, os.name is 'posix') else: _ = system('clear') setcount() countdown()
24.9375
78
0.645781
a89e5b0548fcd38f27b888bc42f9bde35077b713
3,765
py
Python
train.py
DocketScience/WordVectorsForLaw
057b0e87b9810aea3b7f071b5858ebbdb2cb30f8
[ "Apache-2.0" ]
8
2020-07-01T19:11:36.000Z
2021-07-30T12:08:03.000Z
train.py
DocketScience/WordVectorsForLaw
057b0e87b9810aea3b7f071b5858ebbdb2cb30f8
[ "Apache-2.0" ]
2
2021-07-22T12:12:08.000Z
2021-12-16T02:53:59.000Z
train.py
DocketScience/WordVectorsForLaw
057b0e87b9810aea3b7f071b5858ebbdb2cb30f8
[ "Apache-2.0" ]
7
2018-03-17T04:36:03.000Z
2020-04-24T14:42:42.000Z
from __future__ import print_function import re import os import wget from glove import Glove from glove import Corpus from cleanup import extract_text from regexes import REGEXES, REGEX_TOKENS from nltk.tokenize import word_tokenize ''' train.py -------- This module trains a GloVe model on a given legal corpus to build legal domain-specific word vectors. The program first preprocesses the legal opinions using a series of regexes and then calls GloVe functions to output word vectors. The final trained model is saved into the current directory as "LeGlove.model". The following open-source github repository was used and adapted: https://github.com/maciejkula/glove-python The original GloVe project can be found here: https://github.com/stanfordnlp/GloVe ''' # Constants CONTEXT_WINDOW = 10 # length of the (symmetric)context window used for cooccurrence LEARNING_RATE = 0.05 # learning rate used for model training NUM_COMPONENTS = 100 # number of components/dimension of output word vectors ## LeGlove ##################################################################################### def tokenize_text(plain_text): ''' This function accepts a string representation of a legal document as input. It returns a tokenized form of the input after pre-processing. ''' # Clean plain text by replacing all regex matches # with corresponding tokens cleaned_text = plain_text for idx, regex in enumerate(REGEXES): cleaned_text = re.sub(regex, REGEX_TOKENS[idx], cleaned_text, flags=re.IGNORECASE) # Use NLTK tokenizer to return tokenized form of cleaned text tokens = word_tokenize(cleaned_text.lower()) return tokens def read_corpus(data_dir): ''' This function returns a generator of lists of pre-processed tokens over all files in the given data directory. ''' num_files_read = 0 for juris_dir in os.listdir(data_dir): # Avoid hidden files in directory if (juris_dir.startswith('.')): continue juris_dir_path = os.path.join(data_dir, juris_dir) if (not os.path.isdir(juris_dir_path)): continue print('Reading %s...' % juris_dir) for json_file in os.listdir(juris_dir_path): if (not json_file.endswith('.json')): continue num_files_read += 1 if (num_files_read % 1e3 == 0): print("%d json files read..." % num_files_read) json_file_path = os.path.join(juris_dir_path, json_file) plain_text = extract_text(json_file_path) if (plain_text != ''): tokens = tokenize_text(plain_text) yield tokens def train_and_save_model(data_dir, model_name='LeGlove', num_epochs=10, parallel_threads=1): ''' This function processes all the data into a training corpus and fits a GloVe model to this corpus. Parameters: data_dir (string): master directory containing all jurisdiction-level directories model_name (string): name of model to be used for output num_epochs (int): number of epochs for which to train model parallel_threads (int): number of parallel threads to use for training The trained model is saved as "[model_name].model" into the current directory. ''' corpus_model = Corpus() corpus_model.fit(read_corpus(data_dir), window=CONTEXT_WINDOW) glove = Glove(no_components=NUM_COMPONENTS, learning_rate=LEARNING_RATE) glove.fit(corpus_model.matrix, epochs=num_epochs, no_threads=parallel_threads, verbose=True) glove.add_dictionary(corpus_model.dictionary) glove.save(model_name + '.model')
35.186916
98
0.677556
ef41ef81c9b055b4e31b3f97f55b1884f6be3a29
85,752
py
Python
python/ccxt/async_support/ftx.py
hrmhatef/ccxt
3fb0675670f0d9456a25de979e940553a7c8df0e
[ "MIT" ]
null
null
null
python/ccxt/async_support/ftx.py
hrmhatef/ccxt
3fb0675670f0d9456a25de979e940553a7c8df0e
[ "MIT" ]
null
null
null
python/ccxt/async_support/ftx.py
hrmhatef/ccxt
3fb0675670f0d9456a25de979e940553a7c8df0e
[ "MIT" ]
1
2021-09-16T08:33:31.000Z
2021-09-16T08:33:31.000Z
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.async_support.base.exchange import Exchange # ----------------------------------------------------------------------------- try: basestring # Python 3 except NameError: basestring = str # Python 2 import hashlib from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import PermissionDenied from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import BadRequest from ccxt.base.errors import BadSymbol from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import InvalidOrder from ccxt.base.errors import OrderNotFound from ccxt.base.errors import CancelPending from ccxt.base.errors import DuplicateOrderId from ccxt.base.errors import RateLimitExceeded from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.decimal_to_precision import TICK_SIZE from ccxt.base.precise import Precise class ftx(Exchange): def describe(self): return self.deep_extend(super(ftx, self).describe(), { 'id': 'ftx', 'name': 'FTX', 'countries': ['HK'], 'rateLimit': 100, 'certified': True, 'pro': True, 'hostname': 'ftx.com', # or ftx.us 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/67149189-df896480-f2b0-11e9-8816-41593e17f9ec.jpg', 'www': 'https://ftx.com', 'api': { 'public': 'https://{hostname}', 'private': 'https://{hostname}', }, 'doc': 'https://github.com/ftexchange/ftx', 'fees': 'https://ftexchange.zendesk.com/hc/en-us/articles/360024479432-Fees', 'referral': { 'url': 'https://ftx.com/#a=ccxt', 'discount': 0.05, }, }, 'has': { 'cancelAllOrders': True, 'cancelOrder': True, 'createOrder': True, 'editOrder': True, 'fetchBalance': True, 'fetchClosedOrders': None, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingFees': None, 'fetchFundingRate': None, 'fetchFundingHistory': True, 'fetchFundingRateHistory': True, 'fetchFundingRates': None, 'fetchIndexOHLCV': True, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': True, 'fetchPositions': True, 'fetchPremiumIndexOHLCV': False, 'fetchTicker': True, 'fetchTickers': True, 'fetchTime': False, 'fetchTrades': True, 'fetchTradingFees': True, 'fetchWithdrawals': True, 'setLeverage': True, 'setMarginMode': False, # FTX only supports cross margin 'withdraw': True, }, 'timeframes': { '15s': '15', '1m': '60', '5m': '300', '15m': '900', '1h': '3600', '4h': '14400', '1d': '86400', '3d': '259200', '1w': '604800', '2w': '1209600', '1M': '2592000', }, 'api': { 'public': { 'get': [ 'coins', # markets 'markets', 'markets/{market_name}', 'markets/{market_name}/orderbook', # ?depth={depth} 'markets/{market_name}/trades', # ?limit={limit}&start_time={start_time}&end_time={end_time} 'markets/{market_name}/candles', # ?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time} # futures 'futures', 'futures/{future_name}', 'futures/{future_name}/stats', 'funding_rates', 'indexes/{index_name}/weights', 'expired_futures', 'indexes/{market_name}/candles', # ?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time} # wallet 'wallet/coins', # leverage tokens 'lt/tokens', 'lt/{token_name}', # etfs 'etfs/rebalance_info', # options 'options/requests', 'options/trades', 'options/historical_volumes/BTC', 'stats/24h_options_volume', 'options/open_interest/BTC', 'options/historical_open_interest/BTC', # spot margin 'spot_margin/history', 'spot_margin/borrow_summary', # nfts 'nft/nfts', 'nft/{nft_id}', 'nft/{nft_id}/trades', 'nft/all_trades', 'nft/{nft_id}/account_info', 'nft/collections', # ftx pay 'ftxpay/apps/{user_specific_id}/details', ], 'post': [ 'ftxpay/apps/{user_specific_id}/orders', ], }, 'private': { 'get': [ # subaccounts 'subaccounts', 'subaccounts/{nickname}/balances', # account 'account', 'positions', # wallet 'wallet/balances', 'wallet/all_balances', 'wallet/deposit_address/{coin}', # ?method={method} 'wallet/deposits', 'wallet/withdrawals', 'wallet/airdrops', 'wallet/withdrawal_fee', 'wallet/saved_addresses', # orders 'orders', # ?market={market} 'orders/history', # ?market={market} 'orders/{order_id}', 'orders/by_client_id/{client_order_id}', # conditional orders 'conditional_orders', # ?market={market} 'conditional_orders/{conditional_order_id}/triggers', 'conditional_orders/history', # ?market={market} 'fills', # ?market={market} 'funding_payments', # leverage tokens 'lt/balances', 'lt/creations', 'lt/redemptions', # options 'options/my_requests', 'options/requests/{request_id}/quotes', 'options/my_quotes', 'options/account_info', 'options/positions', 'options/fills', # staking 'staking/stakes', 'staking/unstake_requests', 'staking/balances', 'staking/staking_rewards', # otc 'otc/quotes/{quoteId}', # spot margin 'spot_margin/borrow_rates', 'spot_margin/lending_rates', 'spot_margin/market_info', # ?market={market} 'spot_margin/borrow_history', 'spot_margin/lending_history', 'spot_margin/offers', 'spot_margin/lending_info', # nfts 'nft/balances', 'nft/bids', 'nft/deposits', 'nft/withdrawals', 'nft/fills', 'nft/gallery/{gallery_id}', 'nft/gallery_settings', # latency statistics 'stats/latency_stats', ], 'post': [ # subaccounts 'subaccounts', 'subaccounts/update_name', 'subaccounts/transfer', # account 'account/leverage', # wallet 'wallet/withdrawals', 'wallet/saved_addresses', # orders 'orders', 'conditional_orders', 'orders/{order_id}/modify', 'orders/by_client_id/{client_order_id}/modify', 'conditional_orders/{order_id}/modify', # leverage tokens 'lt/{token_name}/create', 'lt/{token_name}/redeem', # options 'options/requests', 'options/requests/{request_id}/quotes', 'options/quotes/{quote_id}/accept', # staking 'staking/unstake_requests', 'srm_stakes/stakes', # otc 'otc/quotes/{quote_id}/accept', 'otc/quotes', # spot margin 'spot_margin/offers', # nfts 'nft/offer', 'nft/buy', 'nft/auction', 'nft/edit_auction', 'nft/cancel_auction', 'nft/bids', 'nft/redeem', 'nft/gallery_settings', # ftx pay 'ftxpay/apps/{user_specific_id}/orders', ], 'delete': [ # subaccounts 'subaccounts', # wallet 'wallet/saved_addresses/{saved_address_id}', # orders 'orders/{order_id}', 'orders/by_client_id/{client_order_id}', 'orders', 'conditional_orders/{order_id}', # options 'options/requests/{request_id}', 'options/quotes/{quote_id}', # staking 'staking/unstake_requests/{request_id}', ], }, }, 'fees': { 'trading': { 'tierBased': True, 'percentage': True, 'maker': self.parse_number('0.0002'), 'taker': self.parse_number('0.0007'), 'tiers': { 'taker': [ [self.parse_number('0'), self.parse_number('0.0007')], [self.parse_number('2000000'), self.parse_number('0.0006')], [self.parse_number('5000000'), self.parse_number('0.00055')], [self.parse_number('10000000'), self.parse_number('0.0005')], [self.parse_number('25000000'), self.parse_number('0.045')], [self.parse_number('50000000'), self.parse_number('0.0004')], ], 'maker': [ [self.parse_number('0'), self.parse_number('0.0002')], [self.parse_number('2000000'), self.parse_number('0.00015')], [self.parse_number('5000000'), self.parse_number('0.0001')], [self.parse_number('10000000'), self.parse_number('0.00005')], [self.parse_number('25000000'), self.parse_number('0')], [self.parse_number('50000000'), self.parse_number('0')], ], }, }, 'funding': { 'withdraw': {}, }, }, 'exceptions': { 'exact': { 'Please slow down': RateLimitExceeded, # {"error":"Please slow down","success":false} 'Size too small for provide': InvalidOrder, # {"error":"Size too small for provide","success":false} 'Not logged in': AuthenticationError, # {"error":"Not logged in","success":false} 'Not enough balances': InsufficientFunds, # {"error":"Not enough balances","success":false} 'InvalidPrice': InvalidOrder, # {"error":"Invalid price","success":false} 'Size too small': InvalidOrder, # {"error":"Size too small","success":false} 'Size too large': InvalidOrder, # {"error":"Size too large","success":false} 'Missing parameter price': InvalidOrder, # {"error":"Missing parameter price","success":false} 'Order not found': OrderNotFound, # {"error":"Order not found","success":false} 'Order already closed': InvalidOrder, # {"error":"Order already closed","success":false} 'Trigger price too high': InvalidOrder, # {"error":"Trigger price too high","success":false} 'Trigger price too low': InvalidOrder, # {"error":"Trigger price too low","success":false} 'Order already queued for cancellation': CancelPending, # {"error":"Order already queued for cancellation","success":false} 'Duplicate client order ID': DuplicateOrderId, # {"error":"Duplicate client order ID","success":false} 'Spot orders cannot be reduce-only': InvalidOrder, # {"error":"Spot orders cannot be reduce-only","success":false} 'Invalid reduce-only order': InvalidOrder, # {"error":"Invalid reduce-only order","success":false} 'Account does not have enough balances': InsufficientFunds, # {"success":false,"error":"Account does not have enough balances"} 'Not authorized for subaccount-specific access': PermissionDenied, # {"success":false,"error":"Not authorized for subaccount-specific access"} }, 'broad': { 'Account does not have enough margin for order': InsufficientFunds, 'Invalid parameter': BadRequest, # {"error":"Invalid parameter start_time","success":false} 'The requested URL was not found on the server': BadRequest, 'No such coin': BadRequest, 'No such subaccount': BadRequest, 'No such future': BadSymbol, 'No such market': BadSymbol, 'Do not send more than': RateLimitExceeded, 'An unexpected error occurred': ExchangeNotAvailable, # {"error":"An unexpected error occurred, please try again later(58BC21C795).","success":false} 'Please retry request': ExchangeNotAvailable, # {"error":"Please retry request","success":false} 'Please try again': ExchangeNotAvailable, # {"error":"Please try again","success":false} 'Try again': ExchangeNotAvailable, # {"error":"Try again","success":false} 'Only have permissions for subaccount': PermissionDenied, # {"success":false,"error":"Only have permissions for subaccount *sub_name*"} }, }, 'precisionMode': TICK_SIZE, 'options': { # support for canceling conditional orders # https://github.com/ccxt/ccxt/issues/6669 'cancelOrder': { 'method': 'privateDeleteOrdersOrderId', # privateDeleteConditionalOrdersOrderId }, 'fetchOpenOrders': { 'method': 'privateGetOrders', # privateGetConditionalOrders }, 'fetchOrders': { 'method': 'privateGetOrdersHistory', # privateGetConditionalOrdersHistory }, 'sign': { 'ftx.com': 'FTX', 'ftx.us': 'FTXUS', }, 'networks': { 'SOL': 'sol', 'SPL': 'sol', 'TRX': 'trx', 'TRC20': 'trx', 'ETH': 'erc20', 'ERC20': 'erc20', 'OMNI': 'omni', 'BEP2': 'bep2', 'BNB': 'bep2', }, }, }) async def fetch_currencies(self, params={}): response = await self.publicGetCoins(params) currencies = self.safe_value(response, 'result', []) # # { # "success":true, # "result": [ # {"id":"BTC","name":"Bitcoin"}, # {"id":"ETH","name":"Ethereum"}, # {"id":"ETHMOON","name":"10X Long Ethereum Token","underlying":"ETH"}, # {"id":"EOSBULL","name":"3X Long EOS Token","underlying":"EOS"}, # ], # } # result = {} for i in range(0, len(currencies)): currency = currencies[i] id = self.safe_string(currency, 'id') code = self.safe_currency_code(id) name = self.safe_string(currency, 'name') result[code] = { 'id': id, 'code': code, 'info': currency, 'type': None, 'name': name, 'active': None, 'fee': None, 'precision': None, 'limits': { 'withdraw': {'min': None, 'max': None}, 'amount': {'min': None, 'max': None}, }, } return result async def fetch_markets(self, params={}): response = await self.publicGetMarkets(params) # # { # 'success': True, # "result": [ # { # "ask":170.37, # "baseCurrency":null, # "bid":170.31, # "change1h":-0.019001554672655036, # "change24h":-0.024841165359738997, # "changeBod":-0.03816406029469881, # "enabled":true, # "last":170.37, # "name":"ETH-PERP", # "price":170.37, # "priceIncrement":0.01, # "quoteCurrency":null, # "quoteVolume24h":7742164.59889, # "sizeIncrement":0.001, # "type":"future", # "underlying":"ETH", # "volumeUsd24h":7742164.59889 # }, # { # "ask":170.44, # "baseCurrency":"ETH", # "bid":170.41, # "change1h":-0.018485459257126403, # "change24h":-0.023825887743413515, # "changeBod":-0.037605872388481086, # "enabled":true, # "last":172.72, # "name":"ETH/USD", # "price":170.44, # "priceIncrement":0.01, # "quoteCurrency":"USD", # "quoteVolume24h":382802.0252, # "sizeIncrement":0.001, # "type":"spot", # "underlying":null, # "volumeUsd24h":382802.0252 # }, # ], # } # result = [] markets = self.safe_value(response, 'result', []) for i in range(0, len(markets)): market = markets[i] id = self.safe_string(market, 'name') baseId = self.safe_string_2(market, 'baseCurrency', 'underlying') quoteId = self.safe_string(market, 'quoteCurrency', 'USD') type = self.safe_string(market, 'type') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) # check if a market is a spot or future market symbol = self.safe_string(market, 'name') if (type == 'future') else (base + '/' + quote) active = self.safe_value(market, 'enabled') sizeIncrement = self.safe_number(market, 'sizeIncrement') priceIncrement = self.safe_number(market, 'priceIncrement') precision = { 'amount': sizeIncrement, 'price': priceIncrement, } result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'type': type, 'future': (type == 'future'), 'spot': (type == 'spot'), 'active': active, 'precision': precision, 'limits': { 'amount': { 'min': sizeIncrement, 'max': None, }, 'price': { 'min': priceIncrement, 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result def parse_ticker(self, ticker, market=None): # # { # "ask":171.29, # "baseCurrency":null, # base currency for spot markets # "bid":171.24, # "change1h":-0.0012244897959183673, # "change24h":-0.031603346901854366, # "changeBod":-0.03297013492914808, # "enabled":true, # "last":171.44, # "name":"ETH-PERP", # "price":171.29, # "priceIncrement":0.01, # "quoteCurrency":null, # quote currency for spot markets # "quoteVolume24h":8570651.12113, # "sizeIncrement":0.001, # "type":"future", # "underlying":"ETH", # null for spot markets # "volumeUsd24h":8570651.12113, # } # symbol = None marketId = self.safe_string(ticker, 'name') if marketId in self.markets_by_id: market = self.markets_by_id[marketId] else: type = self.safe_string(ticker, 'type') if type == 'future': symbol = marketId else: base = self.safe_currency_code(self.safe_string(ticker, 'baseCurrency')) quote = self.safe_currency_code(self.safe_string(ticker, 'quoteCurrency')) if (base is not None) and (quote is not None): symbol = base + '/' + quote if (symbol is None) and (market is not None): symbol = market['symbol'] last = self.safe_number(ticker, 'last') timestamp = self.safe_timestamp(ticker, 'time', self.milliseconds()) percentage = self.safe_number(ticker, 'change24h') if percentage is not None: percentage *= 100 return self.safe_ticker({ 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_number(ticker, 'high'), 'low': self.safe_number(ticker, 'low'), 'bid': self.safe_number(ticker, 'bid'), 'bidVolume': self.safe_number(ticker, 'bidSize'), 'ask': self.safe_number(ticker, 'ask'), 'askVolume': self.safe_number(ticker, 'askSize'), 'vwap': None, 'open': None, 'close': last, 'last': last, 'previousClose': None, 'change': None, 'percentage': percentage, 'average': None, 'baseVolume': None, 'quoteVolume': self.safe_number(ticker, 'quoteVolume24h'), 'info': ticker, }, market) async def fetch_ticker(self, symbol, params={}): await self.load_markets() market = self.market(symbol) request = { 'market_name': market['id'], } response = await self.publicGetMarketsMarketName(self.extend(request, params)) # # { # "success":true, # "result":{ # "ask":171.29, # "baseCurrency":null, # base currency for spot markets # "bid":171.24, # "change1h":-0.0012244897959183673, # "change24h":-0.031603346901854366, # "changeBod":-0.03297013492914808, # "enabled":true, # "last":171.44, # "name":"ETH-PERP", # "price":171.29, # "priceIncrement":0.01, # "quoteCurrency":null, # quote currency for spot markets # "quoteVolume24h":8570651.12113, # "sizeIncrement":0.001, # "type":"future", # "underlying":"ETH", # null for spot markets # "volumeUsd24h":8570651.12113, # } # } # result = self.safe_value(response, 'result', {}) return self.parse_ticker(result, market) async def fetch_tickers(self, symbols=None, params={}): await self.load_markets() response = await self.publicGetMarkets(params) # # { # 'success': True, # "result": [ # { # "ask":170.44, # "baseCurrency":"ETH", # "bid":170.41, # "change1h":-0.018485459257126403, # "change24h":-0.023825887743413515, # "changeBod":-0.037605872388481086, # "enabled":true, # "last":172.72, # "name":"ETH/USD", # "price":170.44, # "priceIncrement":0.01, # "quoteCurrency":"USD", # "quoteVolume24h":382802.0252, # "sizeIncrement":0.001, # "type":"spot", # "underlying":null, # "volumeUsd24h":382802.0252 # }, # ], # } # tickers = self.safe_value(response, 'result', []) return self.parse_tickers(tickers, symbols) async def fetch_order_book(self, symbol, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'market_name': market['id'], } if limit is not None: request['depth'] = limit # max 100, default 20 response = await self.publicGetMarketsMarketNameOrderbook(self.extend(request, params)) # # { # "success":true, # "result":{ # "asks":[ # [171.95,279.865], # [171.98,102.42], # [171.99,124.11], # ], # "bids":[ # [171.93,69.749], # [171.9,288.325], # [171.88,87.47], # ], # } # } # result = self.safe_value(response, 'result', {}) return self.parse_order_book(result, symbol) def parse_ohlcv(self, ohlcv, market=None): # # { # "close":177.23, # "high":177.45, # "low":177.2, # "open":177.43, # "startTime":"2019-10-17T13:27:00+00:00", # "time":1571318820000.0, # "volume":0.0 # } # return [ self.safe_integer(ohlcv, 'time'), self.safe_number(ohlcv, 'open'), self.safe_number(ohlcv, 'high'), self.safe_number(ohlcv, 'low'), self.safe_number(ohlcv, 'close'), self.safe_number(ohlcv, 'volume'), ] def get_market_id(self, symbol, key, params={}): parts = self.get_market_params(symbol, key, params) return self.safe_string(parts, 1, symbol) def get_market_params(self, symbol, key, params={}): market = None marketId = None if symbol in self.markets: market = self.market(symbol) marketId = market['id'] else: marketId = self.safe_string(params, key, symbol) return [market, marketId] async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): await self.load_markets() market, marketId = self.get_market_params(symbol, 'market_name', params) request = { 'resolution': self.timeframes[timeframe], 'market_name': marketId, } price = self.safe_string(params, 'price') params = self.omit(params, 'price') # max 1501 candles, including the current candle when since is not specified limit = 1501 if (limit is None) else limit if since is None: request['end_time'] = self.seconds() request['limit'] = limit request['start_time'] = request['end_time'] - limit * self.parse_timeframe(timeframe) else: request['start_time'] = int(since / 1000) request['limit'] = limit request['end_time'] = self.sum(request['start_time'], limit * self.parse_timeframe(timeframe)) method = 'publicGetMarketsMarketNameCandles' if price == 'index': if symbol in self.markets: request['market_name'] = market['baseId'] method = 'publicGetIndexesMarketNameCandles' response = await getattr(self, method)(self.extend(request, params)) # # { # "success": True, # "result":[ # { # "close":177.23, # "high":177.45, # "low":177.2, # "open":177.43, # "startTime":"2019-10-17T13:27:00+00:00", # "time":1571318820000.0, # "volume":0.0 # }, # { # "close":177.26, # "high":177.33, # "low":177.23, # "open":177.23, # "startTime":"2019-10-17T13:28:00+00:00", # "time":1571318880000.0, # "volume":0.0 # }, # ], # } # result = self.safe_value(response, 'result', []) return self.parse_ohlcvs(result, market, timeframe, since, limit) async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): request = { 'price': 'index', } return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params)) def parse_trade(self, trade, market=None): # # fetchTrades(public) # # { # "id":1715826, # "liquidation":false, # "price":171.62, # "side":"buy", # "size":2.095, # "time":"2019-10-18T12:59:54.288166+00:00" # } # # fetchMyTrades(private) # # { # "fee": 20.1374935, # "feeRate": 0.0005, # "feeCurrency": "USD", # "future": "EOS-0329", # "id": 11215, # "liquidity": "taker", # "market": "EOS-0329", # "baseCurrency": null, # "quoteCurrency": null, # "orderId": 8436981, # "price": 4.201, # "side": "buy", # "size": 9587, # "time": "2019-03-27T19:15:10.204619+00:00", # "type": "order" # } # # { # "baseCurrency": "BTC", # "fee": 0, # "feeCurrency": "USD", # "feeRate": 0, # "future": null, # "id": 664079556, # "liquidity": "taker", # "market": null, # "orderId": null, # "price": 34830.61359, # "quoteCurrency": "USD", # "side": "sell", # "size": 0.0005996, # "time": "2021-01-15T16:05:29.246135+00:00", # "tradeId": null, # "type": "otc" # } # # with -ve fee # { # "id": 1171258927, # "fee": -0.0000713875, # "side": "sell", # "size": 1, # "time": "2021-03-11T13:34:35.523627+00:00", # "type": "order", # "price": 14.2775, # "future": null, # "market": "SOL/USD", # "feeRate": -0.000005, # "orderId": 33182929044, # "tradeId": 582936801, # "liquidity": "maker", # "feeCurrency": "USD", # "baseCurrency": "SOL", # "quoteCurrency": "USD" # } # # # from OTC order # { # "id": 1172129651, # "fee": 0, # "side": "sell", # "size": 1.47568846, # "time": "2021-03-11T15:04:46.893383+00:00", # "type": "otc", # "price": 14.60932598, # "future": null, # "market": null, # "feeRate": 0, # "orderId": null, # "tradeId": null, # "liquidity": "taker", # "feeCurrency": "USD", # "baseCurrency": "BCHA", # "quoteCurrency": "USD" # } id = self.safe_string(trade, 'id') takerOrMaker = self.safe_string(trade, 'liquidity') marketId = self.safe_string(trade, 'market') symbol = None if marketId in self.markets_by_id: market = self.markets_by_id[marketId] symbol = market['symbol'] else: base = self.safe_currency_code(self.safe_string(trade, 'baseCurrency')) quote = self.safe_currency_code(self.safe_string(trade, 'quoteCurrency')) if (base is not None) and (quote is not None): symbol = base + '/' + quote else: symbol = marketId timestamp = self.parse8601(self.safe_string(trade, 'time')) priceString = self.safe_string(trade, 'price') amountString = self.safe_string(trade, 'size') price = self.parse_number(priceString) amount = self.parse_number(amountString) cost = self.parse_number(Precise.string_mul(priceString, amountString)) if (symbol is None) and (market is not None): symbol = market['symbol'] side = self.safe_string(trade, 'side') fee = None feeCost = self.safe_number(trade, 'fee') if feeCost is not None: feeCurrencyId = self.safe_string(trade, 'feeCurrency') feeCurrencyCode = self.safe_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, 'rate': self.safe_number(trade, 'feeRate'), } orderId = self.safe_string(trade, 'orderId') return { 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'id': id, 'order': orderId, 'type': None, 'takerOrMaker': takerOrMaker, 'side': side, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } async def fetch_trades(self, symbol, since=None, limit=None, params={}): await self.load_markets() market, marketId = self.get_market_params(symbol, 'market_name', params) request = { 'market_name': marketId, } if since is not None: request['start_time'] = int(since / 1000) # start_time doesn't work without end_time request['end_time'] = self.seconds() if limit is not None: request['limit'] = limit response = await self.publicGetMarketsMarketNameTrades(self.extend(request, params)) # # { # "success":true, # "result":[ # { # "id":1715826, # "liquidation":false, # "price":171.62, # "side":"buy", # "size":2.095, # "time":"2019-10-18T12:59:54.288166+00:00" # }, # { # "id":1715763, # "liquidation":false, # "price":171.89, # "side":"sell", # "size":1.477, # "time":"2019-10-18T12:58:38.443734+00:00" # }, # ], # } # result = self.safe_value(response, 'result', []) return self.parse_trades(result, market, since, limit) async def fetch_trading_fees(self, params={}): await self.load_markets() response = await self.privateGetAccount(params) # # { # "success": True, # "result": { # "backstopProvider": True, # "collateral": 3568181.02691129, # "freeCollateral": 1786071.456884368, # "initialMarginRequirement": 0.12222384240257728, # "liquidating": False, # "maintenanceMarginRequirement": 0.07177992558058484, # "makerFee": 0.0002, # "marginFraction": 0.5588433331419503, # "openMarginFraction": 0.2447194090423075, # "takerFee": 0.0005, # "totalAccountValue": 3568180.98341129, # "totalPositionSize": 6384939.6992, # "username": "user@domain.com", # "positions": [ # { # "cost": -31.7906, # "entryPrice": 138.22, # "future": "ETH-PERP", # "initialMarginRequirement": 0.1, # "longOrderSize": 1744.55, # "maintenanceMarginRequirement": 0.04, # "netSize": -0.23, # "openSize": 1744.32, # "realizedPnl": 3.39441714, # "shortOrderSize": 1732.09, # "side": "sell", # "size": 0.23, # "unrealizedPnl": 0, # }, # ], # }, # } # result = self.safe_value(response, 'result', {}) return { 'info': response, 'maker': self.safe_number(result, 'makerFee'), 'taker': self.safe_number(result, 'takerFee'), } async def fetch_funding_rate_history(self, symbol, limit=None, since=None, params={}): # # Gets a history of funding rates with their timestamps # (param) symbol: Future currency pair(e.g. "BTC-PERP") # (param) limit: Not used by ftx # (param) since: Unix timestamp in miliseconds for the time of the earliest requested funding rate # return: [{symbol, fundingRate, timestamp}] # await self.load_markets() market = self.market(symbol) request = { 'future': market['id'], } if since is not None: request['start_time'] = since / 1000 method = 'publicGetFundingRates' response = await getattr(self, method)(self.extend(request, params)) # # { # "success": True, # "result": [ # { # "future": "BTC-PERP", # "rate": 0.0025, # "time": "2019-06-02T08:00:00+00:00" # } # ] # } # result = self.safe_value(response, 'result') rates = [] for i in range(0, len(result)): rates.append({ 'symbol': self.safe_string(result[i], 'future'), 'fundingRate': self.safe_number(result[i], 'rate'), 'timestamp': self.parse8601(self.safe_string(result[i], 'time')), }) return self.sort_by(rates, 'timestamp') async def fetch_balance(self, params={}): await self.load_markets() response = await self.privateGetWalletBalances(params) # # { # "success": True, # "result": [ # { # "coin": "USDTBEAR", # "free": 2320.2, # "total": 2340.2 # }, # ], # } # result = { 'info': response, } balances = self.safe_value(response, 'result', []) for i in range(0, len(balances)): balance = balances[i] code = self.safe_currency_code(self.safe_string(balance, 'coin')) account = self.account() account['free'] = self.safe_string_2(balance, 'availableWithoutBorrow', 'free') account['total'] = self.safe_string(balance, 'total') result[code] = account return self.parse_balance(result) def parse_order_status(self, status): statuses = { 'new': 'open', 'open': 'open', 'closed': 'closed', # filled or canceled 'triggered': 'closed', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): # # limit orders - fetchOrder, fetchOrders, fetchOpenOrders, createOrder, editOrder # # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "filledSize": 0, # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "price": 0.306525, # "remainingSize": 31431, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null, # } # # market orders - fetchOrder, fetchOrders, fetchOpenOrders, createOrder # # { # "avgFillPrice": 2666.0, # "clientId": None, # "createdAt": "2020-02-12T00: 53: 49.009726+00: 00", # "filledSize": 0.0007, # "future": None, # "id": 3109208514, # "ioc": True, # "market": "BNBBULL/USD", # "postOnly": False, # "price": None, # "reduceOnly": False, # "remainingSize": 0.0, # "side": "buy", # "size": 0.0007, # "status": "closed", # "type": "market" # } # # createOrder(conditional, "stop", "trailingStop", or "takeProfit") # # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "triggerPrice": 0.306525, # "orderId": null, # "side": "sell", # "size": 31431, # "status": "open", # "type": "stop", # "orderPrice": null, # "error": null, # "triggeredAt": null, # "reduceOnly": False # } # # editOrder(conditional, stop, trailing stop, take profit) # # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "triggerPrice": 0.306225, # "orderId": null, # "side": "sell", # "size": 31431, # "status": "open", # "type": "stop", # "orderPrice": null, # "error": null, # "triggeredAt": null, # "reduceOnly": False, # "orderType": "market", # "filledSize": 0, # "avgFillPrice": null, # "retryUntilFilled": False # } # # canceled order with a closed status # # { # "avgFillPrice":null, # "clientId":null, # "createdAt":"2020-09-01T13:45:57.119695+00:00", # "filledSize":0.0, # "future":null, # "id":8553541288, # "ioc":false, # "liquidation":false, # "market":"XRP/USDT", # "postOnly":false, # "price":0.5, # "reduceOnly":false, # "remainingSize":0.0, # "side":"sell", # "size":46.0, # "status":"closed", # "type":"limit" # } # id = self.safe_string(order, 'id') timestamp = self.parse8601(self.safe_string(order, 'createdAt')) status = self.parse_order_status(self.safe_string(order, 'status')) amount = self.safe_number(order, 'size') filled = self.safe_number(order, 'filledSize') remaining = self.safe_number(order, 'remainingSize') if (remaining == 0.0) and (amount is not None) and (filled is not None): remaining = max(amount - filled, 0) if remaining > 0: status = 'canceled' symbol = None marketId = self.safe_string(order, 'market') if marketId is not None: if marketId in self.markets_by_id: market = self.markets_by_id[marketId] symbol = market['symbol'] else: # support for delisted market ids # https://github.com/ccxt/ccxt/issues/7113 symbol = marketId if (symbol is None) and (market is not None): symbol = market['symbol'] side = self.safe_string(order, 'side') type = self.safe_string(order, 'type') average = self.safe_number(order, 'avgFillPrice') price = self.safe_number_2(order, 'price', 'triggerPrice', average) cost = None if filled is not None and price is not None: cost = filled * price lastTradeTimestamp = self.parse8601(self.safe_string(order, 'triggeredAt')) clientOrderId = self.safe_string(order, 'clientId') stopPrice = self.safe_number(order, 'triggerPrice') postOnly = self.safe_value(order, 'postOnly') return { 'info': order, 'id': id, 'clientOrderId': clientOrderId, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': lastTradeTimestamp, 'symbol': symbol, 'type': type, 'timeInForce': None, 'postOnly': postOnly, 'side': side, 'price': price, 'stopPrice': stopPrice, 'amount': amount, 'cost': cost, 'average': average, 'filled': filled, 'remaining': remaining, 'status': status, 'fee': None, 'trades': None, } async def create_order(self, symbol, type, side, amount, price=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'market': market['id'], 'side': side, # "buy" or "sell" # 'price': 0.306525, # send null for market orders 'type': type, # "limit", "market", "stop", "trailingStop", or "takeProfit" 'size': float(self.amount_to_precision(symbol, amount)), # 'reduceOnly': False, # optional, default is False # 'ioc': False, # optional, default is False, limit or market orders only # 'postOnly': False, # optional, default is False, limit or market orders only # 'clientId': 'abcdef0123456789', # string, optional, client order id, limit or market orders only } clientOrderId = self.safe_string_2(params, 'clientId', 'clientOrderId') if clientOrderId is not None: request['clientId'] = clientOrderId params = self.omit(params, ['clientId', 'clientOrderId']) method = None if type == 'limit': method = 'privatePostOrders' request['price'] = float(self.price_to_precision(symbol, price)) elif type == 'market': method = 'privatePostOrders' request['price'] = None elif (type == 'stop') or (type == 'takeProfit'): method = 'privatePostConditionalOrders' stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerPrice') if stopPrice is None: raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a triggerPrice parameter for ' + type + ' orders') else: params = self.omit(params, ['stopPrice', 'triggerPrice']) request['triggerPrice'] = float(self.price_to_precision(symbol, stopPrice)) if price is not None: request['orderPrice'] = float(self.price_to_precision(symbol, price)) # optional, order type is limit if self is specified, otherwise market elif type == 'trailingStop': method = 'privatePostConditionalOrders' request['trailValue'] = float(self.price_to_precision(symbol, price)) # negative for "sell", positive for "buy" else: raise InvalidOrder(self.id + ' createOrder() does not support order type ' + type + ', only limit, market, stop, trailingStop, or takeProfit orders are supported') response = await getattr(self, method)(self.extend(request, params)) # # orders # # { # "success": True, # "result": [ # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "filledSize": 0, # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "price": 0.306525, # "remainingSize": 31431, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null, # } # ] # } # # conditional orders # # { # "success": True, # "result": [ # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "triggerPrice": 0.306525, # "orderId": null, # "side": "sell", # "size": 31431, # "status": "open", # "type": "stop", # "orderPrice": null, # "error": null, # "triggeredAt": null, # "reduceOnly": False # } # ] # } # # result = self.safe_value(response, 'result', []) return self.parse_order(result, market) async def edit_order(self, id, symbol, type, side, amount, price=None, params={}): await self.load_markets() market = self.market(symbol) request = {} method = None clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId') triggerPrice = self.safe_number(params, 'triggerPrice') orderPrice = self.safe_number(params, 'orderPrice') trailValue = self.safe_number(params, 'trailValue') params = self.omit(params, ['client_order_id', 'clientOrderId', 'triggerPrice', 'orderPrice', 'trailValue']) triggerPriceIsDefined = (triggerPrice is not None) orderPriceIsDefined = (orderPrice is not None) trailValueIsDefined = (trailValue is not None) if triggerPriceIsDefined or orderPriceIsDefined or trailValueIsDefined: method = 'privatePostConditionalOrdersOrderIdModify' request['order_id'] = id if triggerPriceIsDefined: request['triggerPrice'] = float(self.price_to_precision(symbol, triggerPrice)) if orderPriceIsDefined: # only for stop limit or take profit limit orders request['orderPrice'] = float(self.price_to_precision(symbol, orderPrice)) if trailValueIsDefined: # negative for sell orders, positive for buy orders request['trailValue'] = float(self.price_to_precision(symbol, trailValue)) else: if clientOrderId is None: method = 'privatePostOrdersOrderIdModify' request['order_id'] = id else: method = 'privatePostOrdersByClientIdClientOrderIdModify' request['client_order_id'] = clientOrderId # request['clientId'] = clientOrderId if price is not None: request['price'] = float(self.price_to_precision(symbol, price)) if amount is not None: request['size'] = float(self.amount_to_precision(symbol, amount)) response = await getattr(self, method)(self.extend(request, params)) # # regular order # # { # "success": True, # "result": { # "createdAt": "2019-03-05T11:56:55.728933+00:00", # "filledSize": 0, # "future": "XRP-PERP", # "id": 9596932, # "market": "XRP-PERP", # "price": 0.326525, # "remainingSize": 31431, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null, # } # } # # conditional trigger order # # { # "success": True, # "result": { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "triggerPrice": 0.306225, # "orderId": null, # "side": "sell", # "size": 31431, # "status": "open", # "type": "stop", # "orderPrice": null, # "error": null, # "triggeredAt": null, # "reduceOnly": False, # "orderType": "market", # "filledSize": 0, # "avgFillPrice": null, # "retryUntilFilled": False # } # } # result = self.safe_value(response, 'result', {}) return self.parse_order(result, market) async def cancel_order(self, id, symbol=None, params={}): await self.load_markets() request = {} # support for canceling conditional orders # https://github.com/ccxt/ccxt/issues/6669 options = self.safe_value(self.options, 'cancelOrder', {}) defaultMethod = self.safe_string(options, 'method', 'privateDeleteOrdersOrderId') method = self.safe_string(params, 'method', defaultMethod) type = self.safe_value(params, 'type') clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId') if clientOrderId is None: request['order_id'] = int(id) if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'): method = 'privateDeleteConditionalOrdersOrderId' else: request['client_order_id'] = clientOrderId method = 'privateDeleteOrdersByClientIdClientOrderId' query = self.omit(params, ['method', 'type', 'client_order_id', 'clientOrderId']) response = await getattr(self, method)(self.extend(request, query)) # # { # "success": True, # "result": "Order queued for cancelation" # } # result = self.safe_value(response, 'result', {}) return result async def cancel_all_orders(self, symbol=None, params={}): await self.load_markets() request = { # 'market': market['id'], # optional # 'conditionalOrdersOnly': False, # cancel conditional orders only # 'limitOrdersOnly': False, # cancel existing limit orders(non-conditional orders) only } marketId = self.get_market_id(symbol, 'market', params) if marketId is not None: request['market'] = marketId response = await self.privateDeleteOrders(self.extend(request, params)) result = self.safe_value(response, 'result', {}) # # { # "success": True, # "result": "Orders queued for cancelation" # } # return result async def fetch_order(self, id, symbol=None, params={}): await self.load_markets() request = {} clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId') method = 'privateGetOrdersOrderId' if clientOrderId is None: request['order_id'] = id else: request['client_order_id'] = clientOrderId params = self.omit(params, ['client_order_id', 'clientOrderId']) method = 'privateGetOrdersByClientIdClientOrderId' response = await getattr(self, method)(self.extend(request, params)) # # { # "success": True, # "result": { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "filledSize": 10, # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "price": 0.306525, # "avgFillPrice": 0.306526, # "remainingSize": 31421, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null # } # } # result = self.safe_value(response, 'result', {}) return self.parse_order(result) async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() request = {} market, marketId = self.get_market_params(symbol, 'market', params) if marketId is not None: request['market'] = marketId # support for canceling conditional orders # https://github.com/ccxt/ccxt/issues/6669 options = self.safe_value(self.options, 'fetchOpenOrders', {}) defaultMethod = self.safe_string(options, 'method', 'privateGetOrders') method = self.safe_string(params, 'method', defaultMethod) type = self.safe_value(params, 'type') if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'): method = 'privateGetConditionalOrders' query = self.omit(params, ['method', 'type']) response = await getattr(self, method)(self.extend(request, query)) # # { # "success": True, # "result": [ # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "filledSize": 10, # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "price": 0.306525, # "avgFillPrice": 0.306526, # "remainingSize": 31421, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null # } # ] # } # result = self.safe_value(response, 'result', []) return self.parse_orders(result, market, since, limit) async def fetch_orders(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() request = {} market, marketId = self.get_market_params(symbol, 'market', params) if marketId is not None: request['market'] = marketId if limit is not None: request['limit'] = limit # default 100, max 100 if since is not None: request['start_time'] = int(since / 1000) # support for canceling conditional orders # https://github.com/ccxt/ccxt/issues/6669 options = self.safe_value(self.options, 'fetchOrders', {}) defaultMethod = self.safe_string(options, 'method', 'privateGetOrdersHistory') method = self.safe_string(params, 'method', defaultMethod) type = self.safe_value(params, 'type') if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'): method = 'privateGetConditionalOrdersHistory' query = self.omit(params, ['method', 'type']) response = await getattr(self, method)(self.extend(request, query)) # # { # "success": True, # "result": [ # { # "createdAt": "2019-03-05T09:56:55.728933+00:00", # "filledSize": 10, # "future": "XRP-PERP", # "id": 9596912, # "market": "XRP-PERP", # "price": 0.306525, # "avgFillPrice": 0.306526, # "remainingSize": 31421, # "side": "sell", # "size": 31431, # "status": "open", # "type": "limit", # "reduceOnly": False, # "ioc": False, # "postOnly": False, # "clientId": null # } # ] # } # result = self.safe_value(response, 'result', []) return self.parse_orders(result, market, since, limit) async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() market, marketId = self.get_market_params(symbol, 'market', params) request = {} if marketId is not None: request['market'] = marketId if since is not None: request['start_time'] = int(since / 1000) request['end_time'] = self.seconds() response = await self.privateGetFills(self.extend(request, params)) # # { # "success": True, # "result": [ # { # "fee": 20.1374935, # "feeRate": 0.0005, # "future": "EOS-0329", # "id": 11215, # "liquidity": "taker", # "market": "EOS-0329", # "baseCurrency": null, # "quoteCurrency": null, # "orderId": 8436981, # "price": 4.201, # "side": "buy", # "size": 9587, # "time": "2019-03-27T19:15:10.204619+00:00", # "type": "order" # } # ] # } # trades = self.safe_value(response, 'result', []) return self.parse_trades(trades, market, since, limit) async def withdraw(self, code, amount, address, tag=None, params={}): tag, params = self.handle_withdraw_tag_and_params(tag, params) await self.load_markets() self.check_address(address) currency = self.currency(code) request = { 'coin': currency['id'], 'size': amount, 'address': address, # 'password': 'string', # optional withdrawal password if it is required for your account # 'code': '192837', # optional 2fa code if it is required for your account } if self.password is not None: request['password'] = self.password if tag is not None: request['tag'] = tag networks = self.safe_value(self.options, 'networks', {}) network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH network = self.safe_string_lower(networks, network, network) # handle ERC20>ETH alias if network is not None: request['method'] = network params = self.omit(params, 'network') response = await self.privatePostWalletWithdrawals(self.extend(request, params)) # # { # "success": True, # "result": { # "coin": "USDTBEAR", # "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE", # "tag": "null", # "fee": 0, # "id": 1, # "size": "20.2", # "status": "requested", # "time": "2019-03-05T09:56:55.728933+00:00", # "txid": "null" # } # } # result = self.safe_value(response, 'result', {}) return self.parse_transaction(result, currency) async def fetch_positions(self, symbols=None, params={}): await self.load_markets() request = { # 'showAvgPrice': False, } response = await self.privateGetPositions(self.extend(request, params)) # # { # "success": True, # "result": [ # { # "cost": -31.7906, # "entryPrice": 138.22, # "estimatedLiquidationPrice": 152.1, # "future": "ETH-PERP", # "initialMarginRequirement": 0.1, # "longOrderSize": 1744.55, # "maintenanceMarginRequirement": 0.04, # "netSize": -0.23, # "openSize": 1744.32, # "realizedPnl": 3.39441714, # "shortOrderSize": 1732.09, # "side": "sell", # "size": 0.23, # "unrealizedPnl": 0, # "collateralUsed": 3.17906 # } # ] # } # # todo unify parsePosition/parsePositions return self.safe_value(response, 'result', []) async def fetch_account_positions(self, symbols=None, params={}): await self.load_markets() response = await self.privateGetAccount(params) # # { # "result":{ # "backstopProvider":false, # "chargeInterestOnNegativeUsd":false, # "collateral":2830.2567913677476, # "freeCollateral":2829.670741867416, # "initialMarginRequirement":0.05, # "leverage":20.0, # "liquidating":false, # "maintenanceMarginRequirement":0.03, # "makerFee":0.0, # "marginFraction":null, # "openMarginFraction":null, # "positionLimit":null, # "positionLimitUsed":null, # "positions":[ # { # "collateralUsed":0.0, # "cost":0.0, # "entryPrice":null, # "estimatedLiquidationPrice":null, # "future":"XRP-PERP", # "initialMarginRequirement":0.05, # "longOrderSize":0.0, # "maintenanceMarginRequirement":0.03, # "netSize":0.0, # "openSize":0.0, # "realizedPnl":0.016, # "shortOrderSize":0.0, # "side":"buy", # "size":0.0, # "unrealizedPnl":0.0, # } # ], # "spotLendingEnabled":false, # "spotMarginEnabled":false, # "takerFee":0.0007, # "totalAccountValue":2830.2567913677476, # "totalPositionSize":0.0, # "useFttCollateral":true, # "username":"igor.kroitor@gmail.com" # }, # "success":true # } # result = self.safe_value(response, 'result', {}) # todo unify parsePosition/parsePositions return self.safe_value(result, 'positions', []) async def fetch_deposit_address(self, code, params={}): await self.load_markets() currency = self.currency(code) request = { 'coin': currency['id'], } networks = self.safe_value(self.options, 'networks', {}) network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH network = self.safe_string_lower(networks, network, network) # handle ERC20>ETH alias if network is not None: request['method'] = network params = self.omit(params, 'network') response = await self.privateGetWalletDepositAddressCoin(self.extend(request, params)) # # { # "success": True, # "result": { # "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE", # "tag": "null" # } # } # result = self.safe_value(response, 'result', {}) address = self.safe_string(result, 'address') tag = self.safe_string(result, 'tag') self.check_address(address) return { 'currency': code, 'address': address, 'tag': tag, 'info': response, } def parse_transaction_status(self, status): statuses = { # what are other statuses here? 'confirmed': 'ok', # deposits 'complete': 'ok', # withdrawals } return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): # # fetchDeposits # # airdrop # # { # "id": 9147072, # "coin": "SRM_LOCKED", # "size": 3.12, # "time": "2021-04-27T23:59:03.565983+00:00", # "notes": "SRM Airdrop for FTT holdings", # "status": "complete" # } # # regular deposits # # { # "coin": "TUSD", # "confirmations": 64, # "confirmedTime": "2019-03-05T09:56:55.728933+00:00", # "fee": 0, # "id": 1, # "sentTime": "2019-03-05T09:56:55.735929+00:00", # "size": "99.0", # "status": "confirmed", # "time": "2019-03-05T09:56:55.728933+00:00", # "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1" # } # # fetchWithdrawals # # { # "coin": "TUSD", # "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE", # "tag": "null", # "fee": 0, # "id": 1, # "size": "99.0", # "status": "complete", # "time": "2019-03-05T09:56:55.728933+00:00", # "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1" # } # # { # "coin": 'BTC', # "id": 1969806, # "notes": 'Transfer to Dd6gi7m2Eg4zzBbPAxuwfEaHs6tYvyUX5hbPpsTcNPXo', # "size": 0.003, # "status": 'complete', # "time": '2021-02-03T20:28:54.918146+00:00' # } # code = self.safe_currency_code(self.safe_string(transaction, 'coin')) id = self.safe_string(transaction, 'id') amount = self.safe_number(transaction, 'size') status = self.parse_transaction_status(self.safe_string(transaction, 'status')) timestamp = self.parse8601(self.safe_string(transaction, 'time')) txid = self.safe_string(transaction, 'txid') tag = None address = self.safe_value(transaction, 'address') if not isinstance(address, basestring): tag = self.safe_string(address, 'tag') address = self.safe_string(address, 'address') if address is None: # parse address from internal transfer notes = self.safe_string(transaction, 'notes') if (notes is not None) and (notes.find('Transfer to') >= 0): address = notes[12:] fee = self.safe_number(transaction, 'fee') return { 'info': transaction, 'id': id, 'txid': txid, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'addressFrom': None, 'address': address, 'addressTo': address, 'tagFrom': None, 'tag': tag, 'tagTo': tag, 'type': None, 'amount': amount, 'currency': code, 'status': status, 'updated': None, 'fee': { 'currency': code, 'cost': fee, 'rate': None, }, } async def fetch_deposits(self, code=None, since=None, limit=None, params={}): await self.load_markets() response = await self.privateGetWalletDeposits(params) # # { # "success": True, # "result": { # "coin": "TUSD", # "confirmations": 64, # "confirmedTime": "2019-03-05T09:56:55.728933+00:00", # "fee": 0, # "id": 1, # "sentTime": "2019-03-05T09:56:55.735929+00:00", # "size": "99.0", # "status": "confirmed", # "time": "2019-03-05T09:56:55.728933+00:00", # "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1" # } # } # result = self.safe_value(response, 'result', []) currency = None if code is not None: currency = self.currency(code) return self.parse_transactions(result, currency, since, limit, {'type': 'deposit'}) async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}): await self.load_markets() response = await self.privateGetWalletWithdrawals(params) # # { # "success": True, # "result": { # "coin": "TUSD", # "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE", # "tag": "null", # "fee": 0, # "id": 1, # "size": "99.0", # "status": "complete", # "time": "2019-03-05T09:56:55.728933+00:00", # "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1" # } # } # result = self.safe_value(response, 'result', []) currency = None if code is not None: currency = self.currency(code) return self.parse_transactions(result, currency, since, limit, {'type': 'withdrawal'}) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): request = '/api/' + self.implode_params(path, params) query = self.omit(params, self.extract_params(path)) baseUrl = self.implode_hostname(self.urls['api'][api]) url = baseUrl + request if method != 'POST': if query: suffix = '?' + self.urlencode(query) url += suffix request += suffix if api == 'private': self.check_required_credentials() timestamp = str(self.milliseconds()) auth = timestamp + method + request headers = {} if (method == 'POST') or (method == 'DELETE'): body = self.json(query) auth += body headers['Content-Type'] = 'application/json' signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256) options = self.safe_value(self.options, 'sign', {}) headerPrefix = self.safe_string(options, self.hostname, 'FTX') keyField = headerPrefix + '-KEY' tsField = headerPrefix + '-TS' signField = headerPrefix + '-SIGN' headers[keyField] = self.apiKey headers[tsField] = timestamp headers[signField] = signature return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody): if response is None: return # fallback to the default error handler # # {"error":"Invalid parameter start_time","success":false} # {"error":"Not enough balances","success":false} # success = self.safe_value(response, 'success') if not success: feedback = self.id + ' ' + body error = self.safe_string(response, 'error') self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback) self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback) raise ExchangeError(feedback) # unknown message async def set_leverage(self, leverage, symbol=None, params={}): # WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS # AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS if (leverage < 1) or (leverage > 20): raise BadRequest(self.id + ' leverage should be between 1 and 20') request = { 'leverage': leverage, } return await self.privatePostAccountLeverage(self.extend(request, params)) def parse_income(self, income, market=None): # # { # "future": "ETH-PERP", # "id": 33830, # "payment": 0.0441342, # "time": "2019-05-15T18:00:00+00:00", # "rate": 0.0001 # } # marketId = self.safe_string(income, 'future') symbol = self.safe_symbol(marketId, market) amount = self.safe_number(income, 'payment') code = self.safe_currency_code('USD') id = self.safe_string(income, 'id') timestamp = self.safe_integer(income, 'time') rate = self.safe_number(income, 'rate') return { 'info': income, 'symbol': symbol, 'code': code, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'id': id, 'amount': amount, 'rate': rate, } def parse_incomes(self, incomes, market=None, since=None, limit=None): result = [] for i in range(0, len(incomes)): entry = incomes[i] parsed = self.parse_income(entry, market) result.append(parsed) return self.filter_by_since_limit(result, since, limit, 'timestamp') async def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() method = 'private_get_funding_payments' request = {} market = None if symbol is not None: market = self.market(symbol) request['future'] = market['id'] if since is not None: request['startTime'] = since response = await getattr(self, method)(self.extend(request, params)) result = self.safe_value(response, 'result', []) return self.parse_incomes(result, market, since, limit)
41.871094
175
0.451535
c0013c39298be5d72718125a3c6401a131ae58dc
1,182
py
Python
fake_switches/cisco/command_processor/config_vlan.py
freedge/fake-switches
939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3
[ "Apache-2.0" ]
42
2016-06-29T00:29:55.000Z
2022-02-09T17:55:42.000Z
fake_switches/cisco/command_processor/config_vlan.py
freedge/fake-switches
939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3
[ "Apache-2.0" ]
89
2015-12-15T15:42:49.000Z
2021-05-27T16:48:41.000Z
fake_switches/cisco/command_processor/config_vlan.py
freedge/fake-switches
939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3
[ "Apache-2.0" ]
40
2016-02-04T15:27:19.000Z
2022-02-22T13:58:38.000Z
# Copyright 2015 Internap. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from fake_switches.command_processing.base_command_processor import BaseCommandProcessor class ConfigVlanCommandProcessor(BaseCommandProcessor): def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args): super(ConfigVlanCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor) self.vlan = args[0] def get_prompt(self): return self.switch_configuration.name + "(config-vlan)#" def do_name(self, *args): self.vlan.name = (args[0][:32]) def do_exit(self): self.is_done = True
38.129032
121
0.750423
19d1e0c994afe074bf0e3de9ec5a689047a812be
2,651
py
Python
misc/versioning.py
cadl/libmc
10ba59527292d2d415eb963d0080c6b34a1c5f1d
[ "BSD-3-Clause" ]
502
2015-04-02T07:20:45.000Z
2021-11-08T09:39:11.000Z
misc/versioning.py
cadl/libmc
10ba59527292d2d415eb963d0080c6b34a1c5f1d
[ "BSD-3-Clause" ]
80
2015-04-03T15:38:07.000Z
2021-02-10T06:12:00.000Z
misc/versioning.py
cadl/libmc
10ba59527292d2d415eb963d0080c6b34a1c5f1d
[ "BSD-3-Clause" ]
89
2015-04-03T09:31:19.000Z
2021-07-27T04:23:54.000Z
#!/usr/bin/env python2 # from: https://gist.githubusercontent.com/pkrusche/7369262/raw/5bf2dc8afb88d3fdde7be6d16ee4290db6735f37/versioning.py """ Git Versioning Script Will transform stdin to expand some keywords with git version/author/date information. Specify --clean to remove this information before commit. Setup: 1. Copy versioning.py into your git repository 2. Run: git config filter.versioning.smudge 'python versioning.py' git config filter.versioning.clean 'python versioning.py --clean' echo 'version.py filter=versioning' >> .gitattributes git add versioning.py 3. add a version.py file with this contents: __version__ = "" __author__ = "" __email__ = "" __date__ = "" or _Version = "" _Author = "" _Email = "" _Date = "" """ import sys import subprocess import re def main(): clean = False if len(sys.argv) > 1: if sys.argv[1] == '--clean': clean = True # initialise empty here. Otherwise: forkbomb through the git calls. subst_list = { "version": "", "date": "", # "author": "", # "email": "" } for line in sys.stdin: if not clean: subst_list = { # '--dirty' could be added to the following, too, # but is not supported everywhere "version": subprocess.check_output([ 'git', 'describe', '--always', '--tags' ]), "date": subprocess.check_output([ 'git', 'log', '--pretty=format:"%ad"', '-1' ]), # "author": subprocess.check_output([ # 'git', 'log', '--pretty=format:"%an"', '-1' # ]), # "email": subprocess.check_output([ # 'git', 'log', '--pretty=format:"%ae"', '-1' # ]) } for k, v in subst_list.iteritems(): v = re.sub(r'[\n\r\t"\']', "", v) rexp = "__%s__\s*=[\s'\"]+" % k line = re.sub(rexp, "__%s__ = \"%s\"\n" % (k, v), line) rexp = "_%s\s*=[\s'\"]+" % k.capitalize() line = re.sub( rexp, "_%s = \"%s\"\n" % (k.capitalize(), v), line ) sys.stdout.write(line) else: for k in subst_list: rexp = "__%s__\s*=.*" % k line = re.sub(rexp, "__%s__ = \"\"" % k, line) rexp = "_%s\s*=.*" % k.capitalize() line = re.sub(rexp, "_%s = \"\"" % k.capitalize(), line) sys.stdout.write(line) if __name__ == "__main__": main()
28.202128
118
0.497548
b48b19593c08fad299bea25cb0e2da8f245eeddc
4,268
py
Python
pysnmp/ASCEND-MIBOSPFNBMA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/ASCEND-MIBOSPFNBMA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/ASCEND-MIBOSPFNBMA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module ASCEND-MIBOSPFNBMA-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBOSPFNBMA-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 17:11:54 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration") ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") MibIdentifier, iso, ObjectIdentity, Gauge32, Unsigned32, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, TimeTicks, NotificationType, ModuleIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "iso", "ObjectIdentity", "Gauge32", "Unsigned32", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "TimeTicks", "NotificationType", "ModuleIdentity", "IpAddress") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") class DisplayString(OctetString): pass mibospfNbmaNeighborProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 98)) mibospfNbmaNeighborProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 98, 1), ) if mibBuilder.loadTexts: mibospfNbmaNeighborProfileTable.setStatus('mandatory') mibospfNbmaNeighborProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1), ).setIndexNames((0, "ASCEND-MIBOSPFNBMA-MIB", "ospfNbmaNeighborProfile-Name")) if mibBuilder.loadTexts: mibospfNbmaNeighborProfileEntry.setStatus('mandatory') ospfNbmaNeighborProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1, 1), DisplayString()).setLabel("ospfNbmaNeighborProfile-Name").setMaxAccess("readonly") if mibBuilder.loadTexts: ospfNbmaNeighborProfile_Name.setStatus('mandatory') ospfNbmaNeighborProfile_HostName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1, 2), DisplayString()).setLabel("ospfNbmaNeighborProfile-HostName").setMaxAccess("readwrite") if mibBuilder.loadTexts: ospfNbmaNeighborProfile_HostName.setStatus('mandatory') ospfNbmaNeighborProfile_IpAddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1, 3), IpAddress()).setLabel("ospfNbmaNeighborProfile-IpAddress").setMaxAccess("readwrite") if mibBuilder.loadTexts: ospfNbmaNeighborProfile_IpAddress.setStatus('mandatory') ospfNbmaNeighborProfile_DrCapable = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("ospfNbmaNeighborProfile-DrCapable").setMaxAccess("readwrite") if mibBuilder.loadTexts: ospfNbmaNeighborProfile_DrCapable.setStatus('mandatory') ospfNbmaNeighborProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 98, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("ospfNbmaNeighborProfile-Action-o").setMaxAccess("readwrite") if mibBuilder.loadTexts: ospfNbmaNeighborProfile_Action_o.setStatus('mandatory') mibBuilder.exportSymbols("ASCEND-MIBOSPFNBMA-MIB", ospfNbmaNeighborProfile_HostName=ospfNbmaNeighborProfile_HostName, ospfNbmaNeighborProfile_IpAddress=ospfNbmaNeighborProfile_IpAddress, mibospfNbmaNeighborProfileEntry=mibospfNbmaNeighborProfileEntry, ospfNbmaNeighborProfile_DrCapable=ospfNbmaNeighborProfile_DrCapable, mibospfNbmaNeighborProfile=mibospfNbmaNeighborProfile, DisplayString=DisplayString, ospfNbmaNeighborProfile_Action_o=ospfNbmaNeighborProfile_Action_o, ospfNbmaNeighborProfile_Name=ospfNbmaNeighborProfile_Name, mibospfNbmaNeighborProfileTable=mibospfNbmaNeighborProfileTable)
125.529412
595
0.802249
66cc2d54eb748b38ab723c69855d31071b2ae4e1
496
py
Python
var/spack/repos/builtin.mock/packages/requires-virtual/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
11
2015-10-04T02:17:46.000Z
2018-02-07T18:23:00.000Z
var/spack/repos/builtin.mock/packages/requires-virtual/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
22
2017-08-01T22:45:10.000Z
2022-03-10T07:46:31.000Z
var/spack/repos/builtin.mock/packages/requires-virtual/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
4
2016-06-10T17:57:39.000Z
2018-09-11T04:59:38.000Z
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class RequiresVirtual(Package): """Package that requires a virtual dependency and is registered as an external. """ homepage = "http://www.example.com" url = "http://www.example.com/a-1.0.tar.gz" version('2.0', 'abcdef0123456789abcdef0123456789') depends_on('stuff')
29.176471
73
0.71371
817d59a3e8a46d84351ca428aff19074798a5676
475
py
Python
setup.py
IvanYashchuk/jax-fenics
80fbeb2b2e786b0e03760fca741f84223f1a34d1
[ "MIT" ]
35
2020-02-25T17:07:32.000Z
2022-03-21T18:35:38.000Z
setup.py
IvanYashchuk/jax-fenics
80fbeb2b2e786b0e03760fca741f84223f1a34d1
[ "MIT" ]
2
2020-11-23T03:02:22.000Z
2021-04-06T07:50:37.000Z
setup.py
IvanYashchuk/jax-fenics
80fbeb2b2e786b0e03760fca741f84223f1a34d1
[ "MIT" ]
6
2020-03-04T19:13:58.000Z
2022-01-19T03:34:02.000Z
from distutils.core import setup import sys if sys.version_info[0] < 3: raise Exception( "The jaxfenics only supports Python3. Did you run $python setup.py <option>.? Try running $python3 setup.py <option>." ) setup( name="jaxfenics", description="JAX-FEniCS interface", url="https://github.com/IvanYashchuk/jax-fenics", author="Ivan Yashchuk", license="MIT", packages=["jaxfenics"], install_requires=["jax", "fdm", "scipy"], )
25
126
0.665263
f997f0b4276999cdf8e4444e91059c94aaecc75f
533
py
Python
everest/ptolemaic/datalike/secondary/__init__.py
rsbyrne/everest
1ec06301cdeb7c2b7d85daf6075d996c5529247e
[ "MIT" ]
2
2020-12-17T02:27:28.000Z
2020-12-17T23:50:13.000Z
everest/ptolemaic/datalike/secondary/__init__.py
rsbyrne/everest
1ec06301cdeb7c2b7d85daf6075d996c5529247e
[ "MIT" ]
1
2020-12-07T10:14:45.000Z
2020-12-07T10:14:45.000Z
everest/ptolemaic/datalike/secondary/__init__.py
rsbyrne/everest
1ec06301cdeb7c2b7d85daf6075d996c5529247e
[ "MIT" ]
1
2020-10-22T11:16:50.000Z
2020-10-22T11:16:50.000Z
############################################################################### '''''' ############################################################################### from .. import _classtools, _everestutilities from .. import _Ptolemaic, _ur from .. import Datalike as _Datalike from ._secondary import Secondary from .armatures import * from .functional import * ############################################################################### ###############################################################################
28.052632
79
0.296435
2a9d1c4e13afc964136bfc23af99c2379af2d88d
4,900
py
Python
tinyrpc/server/__init__.py
tadmccorkle/tinyrpc
f24c869e45919b31b784c26c6dbd56474e46f9fb
[ "MIT" ]
117
2015-01-13T07:03:04.000Z
2022-03-24T01:40:50.000Z
tinyrpc/server/__init__.py
tadmccorkle/tinyrpc
f24c869e45919b31b784c26c6dbd56474e46f9fb
[ "MIT" ]
83
2015-02-21T12:41:16.000Z
2022-01-31T10:15:22.000Z
tinyrpc/server/__init__.py
tadmccorkle/tinyrpc
f24c869e45919b31b784c26c6dbd56474e46f9fb
[ "MIT" ]
57
2015-01-03T17:51:01.000Z
2022-02-06T12:31:45.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """Server definition. Defines and implements a single-threaded, single-process, synchronous server. """ # FIXME: needs (more) unittests # FIXME: needs checks for out-of-order, concurrency, etc as attributes from typing import Any, Callable import tinyrpc.exc from tinyrpc import RPCProtocol from tinyrpc.dispatch import RPCDispatcher from tinyrpc.transports import ServerTransport class RPCServer(object): """High level RPC server. The server is completely generic only assuming some form of RPC communication is intended. Protocol, data transport and method dispatching are injected into the server object. :param transport: The data transport mechanism to use. :param protocol: The RPC protocol to use. :param dispatcher: The dispatching mechanism to use. :type transport: :py:class:`~tinyrpc.transports.ServerTransport` :type protocol: :py:class:`~tinyrpc.protocols.RPCProtocol` :type dispatcher: :py:class:`~tinyrpc.dispatch.RPCDispatcher` """ trace = None """Trace incoming and outgoing messages. When this attribute is set to a callable this callable will be called directly after a message has been received and immediately after a reply is sent. The callable should accept three positional parameters: :param str direction: Either '-->' for incoming or '<--' for outgoing data. :param any context: The context returned by :py:meth:`~tinyrpc.transports.ServerTransport.receive_message`. :param bytes message: The message itself. Example: .. code-block:: python def my_trace(direction, context, message): logger.debug('%s%s', direction, message) server = RPCServer(transport, protocol, dispatcher) server.trace = my_trace server.serve_forever() will log all incoming and outgoing traffic of the RPC service. Note that the ``message`` will be the data stream that is transported, not the interpreted meaning of that data. It is therefore possible that the binary stream is unreadable without further translation. """ def __init__( self, transport: ServerTransport, protocol: RPCProtocol, dispatcher: RPCDispatcher ): self.transport = transport self.protocol = protocol self.dispatcher = dispatcher self.trace = None def serve_forever(self) -> None: """Handle requests forever. Starts the server loop; continuously calling :py:meth:`receive_one_message` to process the next incoming request. """ while True: self.receive_one_message() def receive_one_message(self) -> None: """Handle a single request. Polls the transport for a new message. After a new message has arrived :py:meth:`_spawn` is called with a handler function and arguments to handle the request. The handler function will try to decode the message using the supplied protocol, if that fails, an error response will be sent. After decoding the message, the dispatcher will be asked to handle the resulting request and the return value (either an error or a result) will be sent back to the client using the transport. """ context, message = self.transport.receive_message() if callable(self.trace): self.trace('-->', context, message) # assuming protocol is thread-safe and dispatcher is thread-safe, as # long as its immutable def handle_message(context: Any, message: bytes) -> None: """Parse, process and reply a single request.""" try: request = self.protocol.parse_request(message) except tinyrpc.exc.RPCError as e: response = e.error_respond() else: response = self.dispatcher.dispatch( request, getattr(self.protocol, '_caller', None) ) # send reply if response is not None: result = response.serialize() if callable(self.trace): self.trace('<--', context, result) self.transport.send_reply(context, result) self._spawn(handle_message, context, message) def _spawn(self, func: Callable, *args, **kwargs): """Spawn a handler function. This function is overridden in subclasses to provide concurrency. In the base implementation, it simply calls the supplied function ``func`` with ``*args`` and ``**kwargs``. This results in a single-threaded, single-process, synchronous server. :param func: A callable to call. :param args: Arguments to ``func``. :param kwargs: Keyword arguments to ``func``. """ func(*args, **kwargs)
36.842105
111
0.657347
1922b80f7f3568d0d5847b54ef5dc196995219ed
2,149
py
Python
tests/test_future.py
tarcisioe/ampdup
047055a9d748ed5b8c9d87e06f50881c2a3fa2c0
[ "MIT" ]
null
null
null
tests/test_future.py
tarcisioe/ampdup
047055a9d748ed5b8c9d87e06f50881c2a3fa2c0
[ "MIT" ]
2
2021-10-02T02:28:42.000Z
2021-10-10T22:28:33.000Z
tests/test_future.py
tarcisioe/ampdup
047055a9d748ed5b8c9d87e06f50881c2a3fa2c0
[ "MIT" ]
null
null
null
"""Tests for the ampdup.future module.""" import pytest from ampdup.future import Future, FutureAlreadySetError @pytest.mark.anyio async def test_get_set_future() -> None: """Checks that a set future returns the set value.""" f: Future[int] = Future() f.set(1) assert await f.get() == 1 @pytest.mark.anyio async def test_get_failed_future() -> None: """Checks that a failed future raises.""" f: Future[int] = Future() class DummyError(Exception): """Error class for testing.""" f.fail(DummyError()) with pytest.raises(DummyError): await f.get() @pytest.mark.anyio async def test_set_future_twice() -> None: """Check that setting a future twice fails.""" f: Future[int] = Future() f.set(1) with pytest.raises(FutureAlreadySetError): f.set(1) @pytest.mark.anyio async def test_fail_set_future() -> None: """Check that failing a set future fails.""" f: Future[int] = Future() f.set(1) with pytest.raises(FutureAlreadySetError): f.fail(ValueError('Oh no!')) @pytest.mark.anyio async def test_set_failed_future() -> None: """Check that setting a failed future fails.""" f: Future[int] = Future() f.fail(ValueError('Oh no!')) with pytest.raises(FutureAlreadySetError): f.set(1) @pytest.mark.anyio async def test_fail_future_twice() -> None: """Check that failing a future twice fails.""" f: Future[int] = Future() f.fail(ValueError('Oh no!')) with pytest.raises(FutureAlreadySetError): f.fail(ValueError('Oh no!')) @pytest.mark.anyio async def test_future_set_after_get() -> None: """Check that setting a future after a call to get works, with multiple getters.""" from anyio import Event, create_task_group will_get = Event() async def get_future(f: Future[int]) -> None: will_get.set() assert await f.get() == 3 f: Future[int] = Future() async with create_task_group() as tg: tg.start_soon(get_future, f) tg.start_soon(get_future, f) tg.start_soon(get_future, f) await will_get.wait() f.set(3)
23.107527
87
0.647278
6601c76a94318899e0399e0998385184f661aa45
91
py
Python
tests/roots/test-directive-code/conf.py
sephalon/sphinx
c892fe98f7a311dc51340f8d98e63d9790b7f820
[ "BSD-2-Clause" ]
1
2021-11-06T17:09:04.000Z
2021-11-06T17:09:04.000Z
tests/roots/test-directive-code/conf.py
sephalon/sphinx
c892fe98f7a311dc51340f8d98e63d9790b7f820
[ "BSD-2-Clause" ]
1
2017-07-15T22:46:50.000Z
2017-07-15T22:46:50.000Z
tests/roots/test-directive-code/conf.py
sephalon/sphinx
c892fe98f7a311dc51340f8d98e63d9790b7f820
[ "BSD-2-Clause" ]
1
2018-11-29T06:45:05.000Z
2018-11-29T06:45:05.000Z
# -*- coding: utf-8 -*- master_doc = 'index' exclude_patterns = ['_build'] numfig = True
13
29
0.626374
8ce3c2efe1948a109dcc2c7ff0df11f6d514e2e7
230
py
Python
depimpact/tests/__init__.py
NazBen/dep-impact
284e72bccfb6309110df5191dfae3c0a93ce813b
[ "MIT" ]
null
null
null
depimpact/tests/__init__.py
NazBen/dep-impact
284e72bccfb6309110df5191dfae3c0a93ce813b
[ "MIT" ]
null
null
null
depimpact/tests/__init__.py
NazBen/dep-impact
284e72bccfb6309110df5191dfae3c0a93ce813b
[ "MIT" ]
null
null
null
""" """ from .test_functions import func_overflow, margins_overflow, var_names_overflow, func_sum, multi_output_func_sum __all__ = ["func_overflow", "margins_overflow", "var_names_overflow", "func_sum", "multi_output_func_sum"]
38.333333
112
0.795652
07cff21d431df085a8e6938c078f5f0a4582f319
10,203
py
Python
apprise/plugins/NotifyParsePlatform.py
linkmauve/apprise
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
[ "MIT" ]
4,764
2018-02-02T18:17:06.000Z
2022-03-31T20:41:13.000Z
apprise/plugins/NotifyParsePlatform.py
linkmauve/apprise
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
[ "MIT" ]
504
2017-11-26T15:56:14.000Z
2022-03-31T22:38:49.000Z
apprise/plugins/NotifyParsePlatform.py
linkmauve/apprise
76700bfa1ddcb2812d9ed14dfa7736125bcd346e
[ "MIT" ]
217
2018-05-22T14:29:20.000Z
2022-03-28T06:24:46.000Z
# -*- coding: utf-8 -*- # # Copyright (C) 2020 Chris Caron <lead2gold@gmail.com> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Official API reference: https://developer.gitter.im/docs/user-resource import re import six import requests from json import dumps from .NotifyBase import NotifyBase from ..common import NotifyType from ..utils import validate_regex from ..AppriseLocale import gettext_lazy as _ # Used to break path apart into list of targets TARGET_LIST_DELIM = re.compile(r'[ \t\r\n,\\/]+') # Priorities class ParsePlatformDevice(object): # All Devices ALL = 'all' # Apple IOS (APNS) IOS = 'ios' # Android/Firebase (FCM) ANDROID = 'android' PARSE_PLATFORM_DEVICES = ( ParsePlatformDevice.ALL, ParsePlatformDevice.IOS, ParsePlatformDevice.ANDROID, ) class NotifyParsePlatform(NotifyBase): """ A wrapper for Parse Platform Notifications """ # The default descriptive name associated with the Notification service_name = 'Parse Platform' # The services URL service_url = ' https://parseplatform.org/' # insecure notifications (using http) protocol = 'parsep' # Secure notifications (using https) secure_protocol = 'parseps' # A URL that takes you to the setup/help of the specific protocol setup_url = 'https://github.com/caronc/apprise/wiki/Notify_parseplatform' # Define object templates templates = ( '{schema}://{app_id}:{master_key}@{host}', '{schema}://{app_id}:{master_key}@{host}:{port}', ) # Define our template tokens template_tokens = dict(NotifyBase.template_tokens, **{ 'host': { 'name': _('Hostname'), 'type': 'string', 'required': True, }, 'port': { 'name': _('Port'), 'type': 'int', 'min': 1, 'max': 65535, }, 'app_id': { 'name': _('App ID'), 'type': 'string', 'private': True, 'required': True, }, 'master_key': { 'name': _('Master Key'), 'type': 'string', 'private': True, 'required': True, }, }) # Define our template arguments template_args = dict(NotifyBase.template_args, **{ 'device': { 'name': _('Device'), 'type': 'choice:string', 'values': PARSE_PLATFORM_DEVICES, 'default': ParsePlatformDevice.ALL, }, 'app_id': { 'alias_of': 'app_id', }, 'master_key': { 'alias_of': 'master_key', }, }) def __init__(self, app_id, master_key, device=None, **kwargs): """ Initialize Parse Platform Object """ super(NotifyParsePlatform, self).__init__(**kwargs) self.fullpath = kwargs.get('fullpath') if not isinstance(self.fullpath, six.string_types): self.fullpath = '/' # Application ID self.application_id = validate_regex(app_id) if not self.application_id: msg = 'An invalid Parse Platform Application ID ' \ '({}) was specified.'.format(app_id) self.logger.warning(msg) raise TypeError(msg) # Master Key self.master_key = validate_regex(master_key) if not self.master_key: msg = 'An invalid Parse Platform Master Key ' \ '({}) was specified.'.format(master_key) self.logger.warning(msg) raise TypeError(msg) # Initialize Devices Array self.devices = [] if device: self.device = device.lower() if device not in PARSE_PLATFORM_DEVICES: msg = 'An invalid Parse Platform device ' \ '({}) was specified.'.format(device) self.logger.warning(msg) raise TypeError(msg) else: self.device = self.template_args['device']['default'] if self.device == ParsePlatformDevice.ALL: self.devices = [d for d in PARSE_PLATFORM_DEVICES if d != ParsePlatformDevice.ALL] else: # Store our device self.devices.append(device) return def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs): """ Perform Parse Platform Notification """ # Prepare our headers: headers = { 'User-Agent': self.app_id, 'Content-Type': 'application/json', 'X-Parse-Application-Id': self.application_id, 'X-Parse-Master-Key': self.master_key, } # prepare our payload payload = { 'where': { 'deviceType': { '$in': self.devices, } }, 'data': { 'title': title, 'alert': body, } } # Set our schema schema = 'https' if self.secure else 'http' # Our Notification URL url = '%s://%s' % (schema, self.host) if isinstance(self.port, int): url += ':%d' % self.port url += self.fullpath.rstrip('/') + '/parse/push/' self.logger.debug('Parse Platform POST URL: %s (cert_verify=%r)' % ( url, self.verify_certificate, )) self.logger.debug('Parse Platform Payload: %s' % str(payload)) # Always call throttle before any remote server i/o is made self.throttle() try: r = requests.post( url, data=dumps(payload), headers=headers, verify=self.verify_certificate, ) if r.status_code != requests.codes.ok: # We had a problem status_str = NotifyParsePlatform.\ http_response_code_lookup(r.status_code) self.logger.warning( 'Failed to send Parse Platform notification: ' '{}{}error={}.'.format( status_str, ', ' if status_str else '', r.status_code)) self.logger.debug('Response Details:\r\n{}'.format(r.content)) # Return; we're done return False else: self.logger.info('Sent Parse Platform notification.') except requests.RequestException as e: self.logger.warning( 'A Connection error occured sending Parse Platform ' 'notification to %s.' % self.host) self.logger.debug('Socket Exception: %s' % str(e)) # Return; we're done return False return True def url(self, privacy=False, *args, **kwargs): """ Returns the URL built dynamically based on specified arguments. """ # Define any arguments set params = { 'device': self.device, } # Extend our parameters params.update(self.url_parameters(privacy=privacy, *args, **kwargs)) default_port = 443 if self.secure else 80 return \ '{schema}://{app_id}:{master_key}@' \ '{hostname}{port}{fullpath}/?{params}'.format( schema=self.secure_protocol if self.secure else self.protocol, app_id=self.pprint(self.application_id, privacy, safe=''), master_key=self.pprint(self.master_key, privacy, safe=''), hostname=NotifyParsePlatform.quote(self.host, safe=''), port='' if self.port is None or self.port == default_port else ':{}'.format(self.port), fullpath=NotifyParsePlatform.quote(self.fullpath, safe='/'), params=NotifyParsePlatform.urlencode(params)) @staticmethod def parse_url(url): """ Parses the URL and returns enough arguments that can allow us to substantiate this object. """ results = NotifyBase.parse_url(url) if not results: # We're done early as we couldn't load the results return results # App ID is retrieved from the user results['app_id'] = NotifyParsePlatform.unquote(results['user']) # Master Key is retrieved from the password results['master_key'] = \ NotifyParsePlatform.unquote(results['password']) # Device support override if 'device' in results['qsd'] and len(results['qsd']['device']): results['device'] = results['qsd']['device'] # Allow app_id attribute over-ride if 'app_id' in results['qsd'] and len(results['qsd']['app_id']): results['app_id'] = results['qsd']['app_id'] # Allow master_key attribute over-ride if 'master_key' in results['qsd'] \ and len(results['qsd']['master_key']): results['master_key'] = results['qsd']['master_key'] return results
31.785047
79
0.56895
e24e6bb928164f1904be03431be250e60f2eb0bf
433
py
Python
core/admin.py
Edinburgh-College-of-Art/learning-energy-systems-v2
4ac57d867f2897da8cd8a7c6c6c697d04c447f01
[ "MIT" ]
null
null
null
core/admin.py
Edinburgh-College-of-Art/learning-energy-systems-v2
4ac57d867f2897da8cd8a7c6c6c697d04c447f01
[ "MIT" ]
18
2018-05-10T14:34:53.000Z
2021-06-10T20:27:09.000Z
core/admin.py
Edinburgh-College-of-Art/learning-energy-systems-v2
4ac57d867f2897da8cd8a7c6c6c697d04c447f01
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Yeargroup, Question, Student, Subject, Prediction, Occurrence class PredictionAdmin(admin.ModelAdmin): list_display = ['id', 'user', 'occurrence'] admin.site.register(Yeargroup) admin.site.register(Question) admin.site.register(Student) admin.site.register(Subject) admin.site.register(Occurrence) admin.site.register(Prediction, PredictionAdmin)
30.928571
81
0.803695
e3c8720a6a77a6ca80ad4888a1c4d51537624c63
2,973
py
Python
books/blackhat_python/ch03/sniffer_with_icmp.py
argodev/learn
d815beb9c1f8fa3dd8cd917640ebcca5822205c3
[ "MIT" ]
null
null
null
books/blackhat_python/ch03/sniffer_with_icmp.py
argodev/learn
d815beb9c1f8fa3dd8cd917640ebcca5822205c3
[ "MIT" ]
15
2020-01-28T22:25:10.000Z
2022-03-11T23:21:02.000Z
books/blackhat_python/ch03/sniffer_with_icmp.py
argodev/learn
d815beb9c1f8fa3dd8cd917640ebcca5822205c3
[ "MIT" ]
null
null
null
import socket import os import struct from ctypes import * # host to listen on host = '192.168.3.157' # our IP header class IP(Structure): # NOTE: The book had the last two fields listed as c_long and that broke things _fields_ = [ ("ihl", c_ubyte, 4), ("version", c_ubyte, 4), ("tos", c_ubyte), ("len", c_ushort), ("id", c_ushort), ("offset", c_ushort), ("ttl", c_ubyte), ("protocol_num",c_ubyte), ("sum", c_ushort), ("src", c_uint), ("dst", c_uint) ] def __new__(self, socket_buffer=None): return self.from_buffer_copy(socket_buffer) def __init__(self, socket_buffer=None): # map protocol constants to their names self.protocol_map = {1:"ICMP", 6:"TCP", 17:"UDP"} # human readable IP addresses self.src_address = socket.inet_ntoa(struct.pack("<L", self.src)) self.dst_address = socket.inet_ntoa(struct.pack("<L", self.dst)) # human readable protocol try: self.protocol = self.protocol_map[self.protocol_num] except: self.protocol = str(self.protocol_num) class ICMP(Structure): _fields_ = [ ("type", c_ubyte), ("code", c_ubyte), ("checksum", c_ushort), ("unused", c_ushort), ("next_hop_mtu", c_ushort) ] def __new__(self, socket_buffer): return self.from_buffer_copy(socket_buffer) def __init__(self, socket_buffer): pass # this should look familiar from the previous example if os.name == 'nt': socket_protocol = socket.IPPROTO_IP else: socket_protocol = socket.IPPROTO_ICMP sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol) sniffer.bind((host, 0)) sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) try: while True: # read in a packet raw_buffer = sniffer.recvfrom(65565)[0] # create an IP header from the first 20 bytes of the buffer ip_header = IP(raw_buffer[0:20]) # print out the protocol that was detected and the hosts print("Protocol: %s %s -> %s" % (ip_header.protocol, ip_header.src_address, ip_header.dst_address)) # if it's ICMP, we want it if ip_header.protocol == 'ICMP': # calculate where our ICMP packet starts offset = ip_header.ihl * 4 buf = raw_buffer[offset:offset + sizeof(ICMP)] # create our ICMP structure icmp_header = ICMP(buf) print("ICMP -> Type: %d Code: %d" % (icmp_header.type, icmp_header.code)) # handle CTRL-C except KeyboardInterrupt: # if we're using Windows, turn off promiscuous mode if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
26.544643
107
0.597713
5458f0afb2d59dfda624fdf3831f72a376d29fa3
1,060
py
Python
tests/__init__.py
jdewells/jschon
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
[ "MIT" ]
null
null
null
tests/__init__.py
jdewells/jschon
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
[ "MIT" ]
null
null
null
tests/__init__.py
jdewells/jschon
14c5415c77b2f1e531bedd8aeeb8051fde7efb3e
[ "MIT" ]
null
null
null
from jschon import URI metaschema_uri_2019_09 = URI("https://json-schema.org/draft/2019-09/schema") metaschema_uri_2020_12 = URI("https://json-schema.org/draft/2020-12/schema") example_schema = { "$id": "dynamicRef8_main.json", "$defs": { "inner": { "$id": "dynamicRef8_inner.json", "$dynamicAnchor": "foo", "title": "inner", "additionalProperties": { "$dynamicRef": "#foo" } } }, "if": { "propertyNames": { "pattern": "^[a-m]" } }, "then": { "title": "any type of node", "$id": "dynamicRef8_anyLeafNode.json", "$dynamicAnchor": "foo", "$ref": "dynamicRef8_main.json#/$defs/inner" }, "else": { "title": "integer node", "$id": "dynamicRef8_integerNode.json", "$dynamicAnchor": "foo", "type": ["object", "integer"], "$ref": "dynamicRef8_main.json#/$defs/inner" } } example_valid = {"alpha": 1.1} example_invalid = {"november": 1.1}
27.179487
76
0.516981
612e02cc62511d933bc184e99c5b9ca674ac22be
764
py
Python
sandcraft/music_mixer.py
jonathansessa/Sandcraft
22b3d0357a3b13b7a539ee52551f1e5b0c09a56e
[ "MIT" ]
3
2021-06-11T20:43:35.000Z
2021-06-16T20:01:36.000Z
sandcraft/music_mixer.py
jonathansessa/Sandcraft
22b3d0357a3b13b7a539ee52551f1e5b0c09a56e
[ "MIT" ]
null
null
null
sandcraft/music_mixer.py
jonathansessa/Sandcraft
22b3d0357a3b13b7a539ee52551f1e5b0c09a56e
[ "MIT" ]
null
null
null
import os from pygame import mixer class MusicMixer: def __init__(self): mixer.init() self.__foundParticle = mixer.Sound(os.path.join(os.path.dirname(__file__), 'soundFiles', 'newParticle.wav')) self.__foundParticle.set_volume(.05) self.__backgroundMusic = mixer.Sound(os.path.join(os.path.dirname(__file__), 'soundFiles', 'background.wav')) self.__backgroundMusic.set_volume(.05) self.backgroundPlay() def ding_Discovery(self): self.__foundParticle.play() def backgroundPlay(self): self.__backgroundMusic.play(-1) def change_volume_background(self, vol): self.__backgroundMusic.set_volume(vol) def endMixer(self): mixer.fadeout(1000) del self
30.56
117
0.675393
5a86f5dba0f6a2ec05a11d97288d854cf854a1fb
2,199
py
Python
allure-pytest/test/steps/simple_steps_test.py
vdsbenoit/allure-python
7b56b031c42369dd73844105382e9ceb9a88d6cd
[ "Apache-2.0" ]
1
2021-02-19T21:00:11.000Z
2021-02-19T21:00:11.000Z
allure-pytest/test/steps/simple_steps_test.py
vdsbenoit/allure-python
7b56b031c42369dd73844105382e9ceb9a88d6cd
[ "Apache-2.0" ]
null
null
null
allure-pytest/test/steps/simple_steps_test.py
vdsbenoit/allure-python
7b56b031c42369dd73844105382e9ceb9a88d6cd
[ "Apache-2.0" ]
1
2020-08-05T05:40:44.000Z
2020-08-05T05:40:44.000Z
""" >>> allure_report = getfixture('allure_report') >>> assert_that(allure_report, ... all_of( ... has_property('test_cases', has_length(8)), ... has_property('test_groups', has_length(0)) ... )) # doctest: +SKIP SEQUENCE OF STEPS >>> for test_case_name in ['test_sequence_of_inside_steps', ... 'test_sequence_of_outside_steps']: ... ... for first_step_fail, second_step_fail in true_false: ... test_case = '{name}[{first}-{second}]'.format(name=test_case_name, ... first=first_step_fail, ... second=second_step_fail) ... ... if not first_step_fail: ... assert_that(allure_report, ... has_test_case(test_case, ... has_step('First step', with_status('passed')), ... has_step('Second step', with_status('failed' if second_step_fail else 'passed')) ... )) ... ... if first_step_fail: ... assert_that(allure_report, ... has_test_case(test_case, ... has_step('First step', with_status('failed')), ... is_not(has_step('Second step')) ... )) """ import pytest import allure from itertools import product true_false = [variants for variants in product([True, False], [True, False])] @pytest.mark.parametrize("first_step_fail, second_step_fail", true_false) def test_sequence_of_inside_steps(first_step_fail, second_step_fail): with allure.step('First step'): assert not first_step_fail with allure.step('Second step'): assert not second_step_fail @allure.step("First step") def first_step(step_fail): assert not step_fail @allure.step("Second step") def second_step(step_fail): assert not step_fail @pytest.mark.parametrize("first_step_fail, second_step_fail", true_false) def test_sequence_of_outside_steps(first_step_fail, second_step_fail): first_step(first_step_fail) second_step(second_step_fail)
32.338235
112
0.582992
0ede3340d0e0320453124f959a9bd9a0b55a0b44
11,173
py
Python
ccyclegan/ccyclegan_t4.py
gtesei/Keras-GAN
8f57901fb637d8d179e780a191683da347af30b2
[ "MIT" ]
9
2019-08-15T10:36:09.000Z
2022-03-13T09:27:47.000Z
ccyclegan/ccyclegan_t4.py
gtesei/Keras-GAN
8f57901fb637d8d179e780a191683da347af30b2
[ "MIT" ]
null
null
null
ccyclegan/ccyclegan_t4.py
gtesei/Keras-GAN
8f57901fb637d8d179e780a191683da347af30b2
[ "MIT" ]
5
2019-11-21T12:04:33.000Z
2021-12-08T16:40:32.000Z
from __future__ import print_function, division import scipy from keras.datasets import mnist from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D from keras.models import Sequential, Model from keras.optimizers import Adam from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply from keras.layers import Reshape import datetime import matplotlib.pyplot as plt import sys from data_loader import DataLoader import numpy as np import os import random class CycleGAN(): def __init__(self,domain_A=6,domain_B=3,img_rows = 48,img_cols = 48,channels = 1, num_classes=7, latent_dim=100): # Input shape self.domain_A = domain_A self.domain_B = domain_B self.img_rows = img_rows self.img_cols = img_cols self.channels = channels self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = num_classes self.latent_dim = latent_dim ## dict self.lab_dict = {0: "Angry", 1: "Disgust" , 2: "Fear" , 3: "Happy" , 4: "Sad" , 5: "Surprise" , 6: "Neutral"} # Configure data loader self.dataset_name = 'fer2013' self.data_loader = DataLoader(dataset_name=self.dataset_name,img_res=self.img_shape) # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of filters in the first layer of G and D self.gf = 32 self.df = 64 # Loss weights self.lambda_cycle = 10.0 # Cycle-consistency loss self.lambda_id = 0.1 * self.lambda_cycle # Identity loss optimizer = Adam(0.0002, 0.5) # Build and compile the discriminators self.d_A = self.build_discriminator() self.d_B = self.build_discriminator() self.d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) #------------------------- # Construct Computational # Graph of Generators #------------------------- # Build the generators self.g_AB = self.build_generator() self.g_BA = self.build_generator() # Input images from both domains img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) # Translate images to the other domain fake_B = self.g_AB(img_A) fake_A = self.g_BA(img_B) # Translate images back to original domain reconstr_A = self.g_BA(fake_B) reconstr_B = self.g_AB(fake_A) # Identity mapping of images img_A_id = self.g_BA(img_A) img_B_id = self.g_AB(img_B) # For the combined model we will only train the generators self.d_A.trainable = False self.d_B.trainable = False # Discriminators determines validity of translated images valid_A = self.d_A(fake_A) valid_B = self.d_B(fake_B) # Combined model trains generators to fool discriminators self.combined = Model(inputs=[img_A, img_B], outputs=[ valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id ]) self.combined.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'], loss_weights=[ 1, 1, self.lambda_cycle, self.lambda_cycle, self.lambda_id, self.lambda_id ], optimizer=optimizer) def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) d = InstanceNormalization()(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = InstanceNormalization()(u) u = Concatenate()([u, skip_input]) return u # Image input d0 = Input(shape=self.img_shape) # Downsampling d1 = conv2d(d0, self.gf) d2 = conv2d(d1, self.gf*2) d3 = conv2d(d2, self.gf*4) d4 = conv2d(d3, self.gf*8) # Upsampling u1 = deconv2d(d4, d3, self.gf*4) u2 = deconv2d(u1, d2, self.gf*2) u3 = deconv2d(u2, d1, self.gf) u4 = UpSampling2D(size=2)(u3) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4) return Model(d0, output_img) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity) def train(self, epochs, batch_size=1, sample_interval=50): start_time = datetime.datetime.now() # Adversarial loss ground truths valid = np.ones((batch_size,) + self.disc_patch) fake = np.zeros((batch_size,) + self.disc_patch) for epoch in range(epochs): for batch_i, (labels_A, imgs_A , labels_B , imgs_B) in enumerate(self.data_loader.load_batch_AB(domain=[self.domain_A,self.domain_B],batch_size=batch_size)): # ---------------------- # Train Discriminators # ---------------------- # Translate images to opposite domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) # Train the discriminators (original images = real / translated = Fake) dA_loss_real = self.d_A.train_on_batch(imgs_A, valid) dA_loss_fake = self.d_A.train_on_batch(fake_A, fake) dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake) dB_loss_real = self.d_B.train_on_batch(imgs_B, valid) dB_loss_fake = self.d_B.train_on_batch(fake_B, fake) dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake) # Total disciminator loss d_loss = 0.5 * np.add(dA_loss, dB_loss) # ------------------ # Train Generators # ------------------ # Train the generators g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, valid, imgs_A, imgs_B, imgs_A, imgs_B]) elapsed_time = datetime.datetime.now() - start_time # Plot the progress print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \ % ( epoch, epochs, batch_i, self.data_loader.n_batches, d_loss[0], 100*d_loss[1], g_loss[0], np.mean(g_loss[1:3]), np.mean(g_loss[3:5]), np.mean(g_loss[5:6]), elapsed_time)) # If at save interval => save generated image samples if batch_i % sample_interval == 0: self.sample_images(epoch, batch_i) def sample_images(self, epoch, batch_i): os.makedirs('images/%s' % self.dataset_name, exist_ok=True) r, c = 2, 3 labels_A, imgs_A = self.data_loader.load_data(domain=self.domain_A, batch_size=1, is_testing=True) labels_B, imgs_B = self.data_loader.load_data(domain=self.domain_B, batch_size=1, is_testing=True) assert labels_A == self.domain_A assert labels_B == self.domain_B # Demo (for GIF) #imgs_A = self.data_loader.load_img('datasets/apple2orange/testA/n07740461_1541.jpg') #imgs_B = self.data_loader.load_img('datasets/apple2orange/testB/n07749192_4241.jpg') # Translate images to the other domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) # Translate back to original domain reconstr_A = self.g_BA.predict(fake_B) reconstr_B = self.g_AB.predict(fake_A) gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Orig['+str(self.domain_A)+'/'+str(self.domain_B)+']', 'Trans['+str(self.domain_B)+'/'+str(self.domain_A)+']', 'Recon['+str(self.domain_A)+'/'+str(self.domain_B)+']'] fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): #print("gen_imgs[cnt]:",gen_imgs[cnt].shape) #print("resize:",np.resize(gen_imgs[cnt],self.img_shape).shape) axs[i,j].imshow( gen_imgs[cnt].squeeze() , cmap='gray') axs[i, j].set_title(titles[j]) axs[i,j].axis('off') cnt += 1 fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i)) plt.close() if __name__ == '__main__': gan = CycleGAN() gan.train(epochs=200, batch_size=1, sample_interval=3000)
40.481884
184
0.545422
26462bdd5119fd37457abdc77e76c2d90a50cc9b
925
py
Python
test_on_image.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
test_on_image.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
test_on_image.py
psethwick/Fast-Neural-Style-Transfer
414b0a2ab08e01f9238671589a7794efad68a44d
[ "MIT" ]
null
null
null
from models import TransformerNet from utils import * import torch from torch.autograd import Variable import os import tqdm from torchvision.utils import save_image from PIL import Image def style_image(image_path, model): os.makedirs("images/outputs", exist_ok=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = style_transform() # Define model and load model checkpoint transformer = TransformerNet().to(device) transformer.load_state_dict(torch.load(model)) transformer.eval() # Prepare input image_tensor = Variable(transform(Image.open(image_path))).to(device) image_tensor = image_tensor.unsqueeze(0) # Stylize image with torch.no_grad(): stylized_image = denormalize(transformer(image_tensor)).cpu() # Save image fn = image_path.split("/")[-1] save_image(stylized_image, "images/outputs/stylized-" + fn)
27.205882
73
0.728649
16fe8530cc9bef146858286ca2046c886f8848a1
7,324
py
Python
sdk/python/pulumi_azure_native/avs/v20200717preview/global_reach_connection.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/avs/v20200717preview/global_reach_connection.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/avs/v20200717preview/global_reach_connection.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = ['GlobalReachConnection'] class GlobalReachConnection(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, authorization_key: Optional[pulumi.Input[str]] = None, global_reach_connection_name: Optional[pulumi.Input[str]] = None, peer_express_route_circuit: Optional[pulumi.Input[str]] = None, private_cloud_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ A global reach connection resource :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] authorization_key: Authorization key from the peer express route used for the global reach connection :param pulumi.Input[str] global_reach_connection_name: Name of the global reach connection in the private cloud :param pulumi.Input[str] peer_express_route_circuit: Identifier of the ExpressRoute Circuit to peer with in the global reach connection :param pulumi.Input[str] private_cloud_name: The name of the private cloud. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['authorization_key'] = authorization_key __props__['global_reach_connection_name'] = global_reach_connection_name __props__['peer_express_route_circuit'] = peer_express_route_circuit if private_cloud_name is None and not opts.urn: raise TypeError("Missing required property 'private_cloud_name'") __props__['private_cloud_name'] = private_cloud_name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['address_prefix'] = None __props__['circuit_connection_status'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:avs/v20200717preview:GlobalReachConnection"), pulumi.Alias(type_="azure-native:avs:GlobalReachConnection"), pulumi.Alias(type_="azure-nextgen:avs:GlobalReachConnection"), pulumi.Alias(type_="azure-native:avs/v20210101preview:GlobalReachConnection"), pulumi.Alias(type_="azure-nextgen:avs/v20210101preview:GlobalReachConnection")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(GlobalReachConnection, __self__).__init__( 'azure-native:avs/v20200717preview:GlobalReachConnection', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'GlobalReachConnection': """ Get an existing GlobalReachConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["address_prefix"] = None __props__["authorization_key"] = None __props__["circuit_connection_status"] = None __props__["name"] = None __props__["peer_express_route_circuit"] = None __props__["provisioning_state"] = None __props__["type"] = None return GlobalReachConnection(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> pulumi.Output[str]: """ The network used for global reach carved out from the original network block provided for the private cloud """ return pulumi.get(self, "address_prefix") @property @pulumi.getter(name="authorizationKey") def authorization_key(self) -> pulumi.Output[Optional[str]]: """ Authorization key from the peer express route used for the global reach connection """ return pulumi.get(self, "authorization_key") @property @pulumi.getter(name="circuitConnectionStatus") def circuit_connection_status(self) -> pulumi.Output[str]: """ The connection status of the global reach connection """ return pulumi.get(self, "circuit_connection_status") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="peerExpressRouteCircuit") def peer_express_route_circuit(self) -> pulumi.Output[Optional[str]]: """ Identifier of the ExpressRoute Circuit to peer with in the global reach connection """ return pulumi.get(self, "peer_express_route_circuit") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The state of the ExpressRoute Circuit Authorization provisioning """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
44.932515
417
0.669306
0aeec95318966b1faebddfa93dcae7b668693156
2,376
py
Python
challenges/2020/python/utils.py
basoares/advent-of-code
3b8216f0e73b12fd879aecea56783b8db7a4bc16
[ "MIT" ]
null
null
null
challenges/2020/python/utils.py
basoares/advent-of-code
3b8216f0e73b12fd879aecea56783b8db7a4bc16
[ "MIT" ]
null
null
null
challenges/2020/python/utils.py
basoares/advent-of-code
3b8216f0e73b12fd879aecea56783b8db7a4bc16
[ "MIT" ]
null
null
null
''' Imports and utility functions ''' #region Imports import os, sys import re from collections import defaultdict, Counter, deque, namedtuple from itertools import chain, combinations, product, permutations, cycle from copy import deepcopy from math import gcd, sqrt, atan2, degrees from networkx import Graph, DiGraph, all_pairs_shortest_path, transitive_closure, shortest_path_length #endregion #region FileInput #def day_input(day, parser=str.strip, fpath='../input/d{}.txt'): # with open(fpath.format(day)) as f: # return [parser(l) for l in f] def day_input(day, parser=str.strip, delimiter='\n', fpath='../input/d{}.txt'): with open(fpath.format(day)) as f: return [parser(l) for l in f.read().split(delimiter)] #endregion #region Utilities def fst(x): '''First element of a pair''' return x[0] def snd(x): '''Second element of a pair''' return x[1] def neighbors4(point): '''The four neighboring squares''' x, y = point return [(x-1, y), (x, y-1), (x+1, y), (x, y+1)] def neighbors8(point): '''The eight neighboring squares''' x, y = point return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)] def distance(a, b): "Straightline distance between two points." return ( ((fst(a)-fst(b)) ** 2) + ((snd(a)-snd(b)) ** 2) ) ** 0.5 def manhattan_distance(a, b): return abs(fst(a) - fst(b)) + abs(snd(a) - snd(b)) def lcm(x, y): return x // gcd(x, y) * y #endregion #region Lists def integers(line): return [int(i) for i in re.findall(r'-?\d+', line)] def flatten(l): return [e for x in l for e in x] class DynamicList(list): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __setitem__(self, index, item): self.expand(index) return super().__setitem__(index, item) def __getitem__(self, index): self.expand(index) return super().__getitem__(index) def expand(self, index): if isinstance(index, int): index += 1 elif isinstance(index, slice): if isinstance(index.start, int) or isinstance(index.stop, int): index = max(index.start, index.stop) if isinstance(index, int) and index >= len(self): self.extend([0] * (index - len(self))) #endregion
25.548387
102
0.611953
e23e0ab7fe32d520eea505487c2c4d180f7f7927
1,750
py
Python
src/probnum/statespace/generate_samples.py
NinaEffenberger/probnum
595f55f9f235fd0396d02b9a6f828aba2383dceb
[ "MIT" ]
4
2020-12-07T11:56:48.000Z
2021-04-16T14:50:40.000Z
src/probnum/statespace/generate_samples.py
NinaEffenberger/probnum
595f55f9f235fd0396d02b9a6f828aba2383dceb
[ "MIT" ]
42
2021-04-12T08:11:41.000Z
2022-03-28T00:21:55.000Z
src/probnum/statespace/generate_samples.py
NinaEffenberger/probnum
595f55f9f235fd0396d02b9a6f828aba2383dceb
[ "MIT" ]
null
null
null
"""Convenience function(s) for state space models.""" import numpy as np import scipy.stats def generate_samples(dynmod, measmod, initrv, times, random_state=None): """Samples true states and observations at pre-determined timesteps "times" for a state space model. Parameters ---------- dynmod : statespace.Transition Transition model describing the prior dynamics. measmod : statespace.Transition Transition model describing the measurement model. initrv : probnum.RandomVariable object Random variable according to initial distribution times : np.ndarray, shape (n,) Timesteps on which the states are to be sampled. random_state : Random state that is used to generate the samples from the latent state. The measurement samples are not affected by this. Returns ------- states : np.ndarray; shape (len(times), dynmod.dimension) True states according to dynamic model. obs : np.ndarray; shape (len(times), measmod.dimension) Observations according to measurement model. """ obs = np.zeros((len(times), measmod.output_dim)) base_measure_realizations_latent_state = scipy.stats.norm.rvs( size=(times.shape + (measmod.input_dim,)), random_state=random_state ) latent_states = np.array( dynmod.jointly_transform_base_measure_realization_list_forward( base_measure_realizations=base_measure_realizations_latent_state, t=times, initrv=initrv, ) ) for idx, (state, t) in enumerate(zip(latent_states, times)): measured_rv, _ = measmod.forward_realization(state, t=t) obs[idx] = measured_rv.sample() return latent_states, obs
35.714286
85
0.690286
76de81819675ee3517b3614931ecdfafb0eb6e81
13,462
py
Python
datumaro/plugins/camvid_format.py
chuneuny-emily/datumaro
25d459e0d130f250f071f9de52171d52a454d381
[ "MIT" ]
null
null
null
datumaro/plugins/camvid_format.py
chuneuny-emily/datumaro
25d459e0d130f250f071f9de52171d52a454d381
[ "MIT" ]
null
null
null
datumaro/plugins/camvid_format.py
chuneuny-emily/datumaro
25d459e0d130f250f071f9de52171d52a454d381
[ "MIT" ]
9
2021-05-17T07:00:03.000Z
2021-06-26T02:15:10.000Z
# Copyright (C) 2020 Intel Corporation # # SPDX-License-Identifier: MIT import logging as log import os import os.path as osp from collections import OrderedDict from enum import Enum import numpy as np from datumaro.components.converter import Converter from datumaro.components.extractor import (AnnotationType, CompiledMask, DatasetItem, Importer, LabelCategories, Mask, MaskCategories, SourceExtractor) from datumaro.util import find, str_to_bool from datumaro.util.annotation_util import make_label_id_mapping from datumaro.util.image import save_image from datumaro.util.mask_tools import generate_colormap, lazy_mask, paint_mask CamvidLabelMap = OrderedDict([ ('Void', (0, 0, 0)), ('Animal', (64, 128, 64)), ('Archway', (192, 0, 128)), ('Bicyclist', (0, 128, 192)), ('Bridge', (0, 128, 64)), ('Building', (128, 0, 0)), ('Car', (64, 0, 128)), ('CartLuggagePram', (64, 0, 192)), ('Child', (192, 128, 64)), ('Column_Pole', (192, 192, 128)), ('Fence', (64, 64, 128)), ('LaneMkgsDriv', (128, 0, 192)), ('LaneMkgsNonDriv', (192, 0, 64)), ('Misc_Text', (128, 128, 64)), ('MotorcycycleScooter', (192, 0, 192)), ('OtherMoving', (128, 64, 64)), ('ParkingBlock', (64, 192, 128)), ('Pedestrian', (64, 64, 0)), ('Road', (128, 64, 128)), ('RoadShoulder', (128, 128, 192)), ('Sidewalk', (0, 0, 192)), ('SignSymbol', (192, 128, 128)), ('Sky', (128, 128, 128)), ('SUVPickupTruck', (64, 128, 192)), ('TrafficCone', (0, 0, 64)), ('TrafficLight', (0, 64, 64)), ('Train', (192, 64, 128)), ('Tree', (128, 128, 0)), ('Truck_Bus', (192, 128, 192)), ('Tunnel', (64, 0, 64)), ('VegetationMisc', (192, 192, 0)), ('Wall', (64, 192, 0)) ]) class CamvidPath: LABELMAP_FILE = 'label_colors.txt' SEGM_DIR = "annot" IMAGE_EXT = '.jpg' MASK_EXT = '.png' def parse_label_map(path): if not path: return None label_map = OrderedDict() with open(path, 'r') as f: for line in f: # skip empty and commented lines line = line.strip() if not line or line and line[0] == '#': continue # color, name label_desc = line.strip().split() if 2 < len(label_desc): name = label_desc[3] color = tuple([int(c) for c in label_desc[:-1]]) else: name = label_desc[0] color = None if name in label_map: raise ValueError("Label '%s' is already defined" % name) label_map[name] = color return label_map def write_label_map(path, label_map): with open(path, 'w') as f: for label_name, label_desc in label_map.items(): if label_desc: color_rgb = ' '.join(str(c) for c in label_desc) else: color_rgb = '' f.write('%s %s\n' % (color_rgb, label_name)) def make_camvid_categories(label_map=None): if label_map is None: label_map = CamvidLabelMap # There must always be a label with color (0, 0, 0) at index 0 bg_label = find(label_map.items(), lambda x: x[1] == (0, 0, 0)) if bg_label is not None: bg_label = bg_label[0] else: bg_label = 'background' if bg_label not in label_map: has_colors = any(v is not None for v in label_map.values()) color = (0, 0, 0) if has_colors else None label_map[bg_label] = color label_map.move_to_end(bg_label, last=False) categories = {} label_categories = LabelCategories() for label, desc in label_map.items(): label_categories.add(label) categories[AnnotationType.label] = label_categories has_colors = any(v is not None for v in label_map.values()) if not has_colors: # generate new colors colormap = generate_colormap(len(label_map)) else: # only copy defined colors label_id = lambda label: label_categories.find(label)[0] colormap = { label_id(name): (desc[0], desc[1], desc[2]) for name, desc in label_map.items() } mask_categories = MaskCategories(colormap) mask_categories.inverse_colormap # pylint: disable=pointless-statement categories[AnnotationType.mask] = mask_categories return categories class CamvidExtractor(SourceExtractor): def __init__(self, path, subset=None): assert osp.isfile(path), path self._path = path self._dataset_dir = osp.dirname(path) if not subset: subset = osp.splitext(osp.basename(path))[0] super().__init__(subset=subset) self._categories = self._load_categories(self._dataset_dir) self._items = list(self._load_items(path).values()) def _load_categories(self, path): label_map = None label_map_path = osp.join(path, CamvidPath.LABELMAP_FILE) if osp.isfile(label_map_path): label_map = parse_label_map(label_map_path) else: label_map = CamvidLabelMap self._labels = [label for label in label_map] return make_camvid_categories(label_map) def _load_items(self, path): items = {} labels = self._categories[AnnotationType.label]._indices labels = { labels[label_name]: label_name for label_name in labels } with open(path, encoding='utf-8') as f: for line in f: line = line.strip() objects = line.split('\"') if 1 < len(objects): if len(objects) == 5: objects[0] = objects[1] objects[1] = objects[3] else: raise Exception("Line %s: unexpected number " "of quotes in filename" % line) else: objects = line.split() image = objects[0] item_id = osp.splitext(osp.join(*image.split('/')[2:]))[0] image_path = osp.join(self._dataset_dir, image.lstrip('/')) item_annotations = [] if 1 < len(objects): gt = objects[1] gt_path = osp.join(self._dataset_dir, gt.lstrip('/')) mask = lazy_mask(gt_path, self._categories[AnnotationType.mask].inverse_colormap) mask = mask() # loading mask through cache classes = np.unique(mask) for label_id in classes: if labels[label_id] in self._labels: image = self._lazy_extract_mask(mask, label_id) item_annotations.append( Mask(image=image, label=label_id)) items[item_id] = DatasetItem(id=item_id, subset=self._subset, image=image_path, annotations=item_annotations) return items @staticmethod def _lazy_extract_mask(mask, c): return lambda: mask == c class CamvidImporter(Importer): @classmethod def find_sources(cls, path): return cls._find_sources_recursive(path, '.txt', 'camvid', file_filter=lambda p: osp.basename(p) != CamvidPath.LABELMAP_FILE) LabelmapType = Enum('LabelmapType', ['camvid', 'source']) class CamvidConverter(Converter): DEFAULT_IMAGE_EXT = CamvidPath.IMAGE_EXT @classmethod def build_cmdline_parser(cls, **kwargs): parser = super().build_cmdline_parser(**kwargs) parser.add_argument('--apply-colormap', type=str_to_bool, default=True, help="Use colormap for class masks (default: %(default)s)") parser.add_argument('--label-map', type=cls._get_labelmap, default=None, help="Labelmap file path or one of %s" % \ ', '.join(t.name for t in LabelmapType)) def __init__(self, extractor, save_dir, apply_colormap=True, label_map=None, **kwargs): super().__init__(extractor, save_dir, **kwargs) self._apply_colormap = apply_colormap if label_map is None: label_map = LabelmapType.source.name self._load_categories(label_map) def apply(self): os.makedirs(self._save_dir, exist_ok=True) for subset_name, subset in self._extractor.subsets().items(): segm_list = {} for item in subset: image_path = self._make_image_filename(item, subdir=subset_name) if self._save_images: self._save_image(item, osp.join(self._save_dir, image_path)) masks = [a for a in item.annotations if a.type == AnnotationType.mask] if masks: compiled_mask = CompiledMask.from_instance_masks(masks, instance_labels=[self._label_id_mapping(m.label) for m in masks]) mask_path = osp.join(subset_name + CamvidPath.SEGM_DIR, item.id + CamvidPath.MASK_EXT) self.save_segm(osp.join(self._save_dir, mask_path), compiled_mask.class_mask) segm_list[item.id] = (image_path, mask_path) else: segm_list[item.id] = (image_path, '') self.save_segm_lists(subset_name, segm_list) self.save_label_map() def save_segm(self, path, mask, colormap=None): if self._apply_colormap: if colormap is None: colormap = self._categories[AnnotationType.mask].colormap mask = paint_mask(mask, colormap) save_image(path, mask, create_dir=True) def save_segm_lists(self, subset_name, segm_list): if not segm_list: return ann_file = osp.join(self._save_dir, subset_name + '.txt') with open(ann_file, 'w', encoding='utf-8') as f: for (image_path, mask_path) in segm_list.values(): image_path = '/' + image_path.replace('\\', '/') mask_path = mask_path.replace('\\', '/') if 1 < len(image_path.split()) or 1 < len(mask_path.split()): image_path = '\"' + image_path + '\"' mask_path = '\"' + mask_path + '\"' f.write('%s %s\n' % (image_path, mask_path)) def save_label_map(self): path = osp.join(self._save_dir, CamvidPath.LABELMAP_FILE) labels = self._extractor.categories()[AnnotationType.label]._indices if len(self._label_map) > len(labels): self._label_map.pop('background') write_label_map(path, self._label_map) def _load_categories(self, label_map_source): if label_map_source == LabelmapType.camvid.name: # use the default Camvid colormap label_map = CamvidLabelMap elif label_map_source == LabelmapType.source.name and \ AnnotationType.mask not in self._extractor.categories(): # generate colormap for input labels labels = self._extractor.categories() \ .get(AnnotationType.label, LabelCategories()) label_map = OrderedDict((item.name, None) for item in labels.items) elif label_map_source == LabelmapType.source.name and \ AnnotationType.mask in self._extractor.categories(): # use source colormap labels = self._extractor.categories()[AnnotationType.label] colors = self._extractor.categories()[AnnotationType.mask] label_map = OrderedDict() for idx, item in enumerate(labels.items): color = colors.colormap.get(idx) if color is not None: label_map[item.name] = color elif isinstance(label_map_source, dict): label_map = OrderedDict( sorted(label_map_source.items(), key=lambda e: e[0])) elif isinstance(label_map_source, str) and osp.isfile(label_map_source): label_map = parse_label_map(label_map_source) else: raise Exception("Wrong labelmap specified, " "expected one of %s or a file path" % \ ', '.join(t.name for t in LabelmapType)) self._categories = make_camvid_categories(label_map) self._label_map = label_map self._label_id_mapping = self._make_label_id_map() def _make_label_id_map(self): map_id, id_mapping, src_labels, dst_labels = make_label_id_mapping( self._extractor.categories().get(AnnotationType.label), self._categories[AnnotationType.label]) void_labels = [src_label for src_id, src_label in src_labels.items() if src_label not in dst_labels] if void_labels: log.warning("The following labels are remapped to background: %s" % ', '.join(void_labels)) log.debug("Saving segmentations with the following label mapping: \n%s" % '\n'.join(["#%s '%s' -> #%s '%s'" % ( src_id, src_label, id_mapping[src_id], self._categories[AnnotationType.label] \ .items[id_mapping[src_id]].name ) for src_id, src_label in src_labels.items() ]) ) return map_id
37.394444
81
0.580894
da18571d5b9802bd69788b3072aea6066daacfa0
1,517
py
Python
examples/integration/annotations/label_set.py
timgates42/bokeh
fb8b07b838f4d07d520cfe899779a11bc89f3c77
[ "BSD-3-Clause" ]
1
2015-01-31T14:42:39.000Z
2015-01-31T14:42:39.000Z
examples/integration/annotations/label_set.py
timgates42/bokeh
fb8b07b838f4d07d520cfe899779a11bc89f3c77
[ "BSD-3-Clause" ]
1
2021-05-11T23:19:27.000Z
2021-05-11T23:19:27.000Z
examples/integration/annotations/label_set.py
timgates42/bokeh
fb8b07b838f4d07d520cfe899779a11bc89f3c77
[ "BSD-3-Clause" ]
1
2020-03-06T07:38:50.000Z
2020-03-06T07:38:50.000Z
from bokeh.io import save from bokeh.models import ColumnDataSource, LabelSet, LinearAxis, Plot, Range1d source = ColumnDataSource(data=dict(text=['one', 'two', 'three'], x1=[1,4,7], x2=[60,240,420])) # Have to specify x/y range as labels aren't included in the plot area solver plot = Plot(plot_width=600, plot_height=600, x_range=Range1d(0, 10), y_range=Range1d(0, 10), toolbar_location=None) label_set1 = LabelSet(x='x1', y=2, x_offset=25, y_offset=25, text="text", source=source, text_font_size='38pt', text_color='red', text_alpha=0.9, text_baseline='bottom', text_align='left', background_fill_color='green', background_fill_alpha=0.2, angle=15, angle_units='deg', render_mode='canvas') label_set2 = LabelSet(x='x2', y=4, x_units='screen', x_offset=25, y_offset=25, text="text", source=source, text_font_size='38pt', text_color='red', text_alpha=0.9, text_baseline='bottom', text_align='left', background_fill_color='green', background_fill_alpha=0.2, angle=15, angle_units='deg', render_mode='css') plot.add_layout(LinearAxis(), 'below') plot.add_layout(LinearAxis(), 'left') plot.add_layout(label_set1) plot.add_layout(label_set2) save(plot)
42.138889
79
0.57416
a0c3f0dac36f9e7016e9660fe24ac5f4c67ffe6c
3,463
py
Python
setup.py
itomaldonado/PyTravisCI
0b6d4ce84d7b0c596e3174d96753a2c1d8e4b280
[ "MIT" ]
3
2020-03-18T09:15:40.000Z
2021-12-05T11:09:23.000Z
setup.py
itomaldonado/PyTravisCI
0b6d4ce84d7b0c596e3174d96753a2c1d8e4b280
[ "MIT" ]
4
2020-12-12T16:10:13.000Z
2021-04-12T07:07:08.000Z
setup.py
itomaldonado/PyTravisCI
0b6d4ce84d7b0c596e3174d96753a2c1d8e4b280
[ "MIT" ]
1
2021-03-11T15:33:46.000Z
2021-03-11T15:33:46.000Z
""" Just another Python API for Travis CI (API). Author Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom Project link https://github.com/funilrys/PyTravisCI License :: MIT License Copyright (c) 2019 Nissar Chababy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import re from unittest import TestLoader from setuptools import setup, find_packages PACKAGE_NAME = "PyTravisCI" def test_suite(): """ Discover the tests. """ test_loader = TestLoader() discovered_tests = test_loader.discover("tests", pattern="test_*.py") return discovered_tests def get_requirements(): """ Extract all requirements from requirements.txt. """ with open("requirements.txt") as file: requirements = file.read().splitlines() return requirements def get_version(): """ Extract the version from {PACKAGE_NAME}/__init__.py """ to_match = re.compile(r'__version__\s+=\s+"(.*)"') with open(f"{PACKAGE_NAME}/__about__.py", encoding="utf-8") as file_stream: return to_match.findall(file_stream.read())[0] def get_long_description(): """ Extract the long description from README.rst. """ with open("README.rst", encoding="utf-8") as file_stream: return file_stream.read() if __name__ == "__main__": setup( name=PACKAGE_NAME, version=get_version(), python_requires=">=3.6, <4", install_requires=get_requirements(), description="Just another Python API for Travis CI (API).", long_description=get_long_description(), author="funilrys", author_email="contact@funilrys.com", license="MIT", url="https://github.com/funilrys/PyTravisCI", platforms=["any"], packages=find_packages(exclude=("*.tests", "*.tests.*", "tests.*", "tests")), keywords=["Travis CI", "Travis", "CI", "API"], classifiers=[ "Environment :: Console", "Topic :: Internet", "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "License :: OSI Approved", "License :: OSI Approved :: MIT License", ], test_suite="setup.test_suite", )
30.377193
86
0.65117
6ceb6c1a3107d099c85149be0cad8989f744d8da
7,242
py
Python
albumy/blueprints/admin.py
nandou1006/albumy
1dc339c3dccc1d4fc6cb94dc1da2858ee829d695
[ "MIT" ]
null
null
null
albumy/blueprints/admin.py
nandou1006/albumy
1dc339c3dccc1d4fc6cb94dc1da2858ee829d695
[ "MIT" ]
null
null
null
albumy/blueprints/admin.py
nandou1006/albumy
1dc339c3dccc1d4fc6cb94dc1da2858ee829d695
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ :author: Grey Li :url: http://greyli.com :copyright: © 2018 Grey Li <withlihui@gmail.com> :license: MIT, see LICENSE for more details. """ from flask import render_template, flash, Blueprint, request, current_app from flask_login import login_required from albumy.decorators import admin_required, permission_required from albumy.extensions import db from albumy.forms.admin import EditProfileAdminForm from albumy.models import Role, User, Tag, Photo, Comment from albumy.utils import redirect_back admin_bp = Blueprint('admin', __name__) @admin_bp.route('/') @login_required @permission_required('MODERATE') def index(): user_count = User.query.count() locked_user_count = User.query.filter_by(locked=True).count() blocked_user_count = User.query.filter_by(active=False).count() photo_count = Photo.query.count() reported_photos_count = Photo.query.filter(Photo.flag > 0).count() tag_count = Tag.query.count() comment_count = Comment.query.count() reported_comments_count = Comment.query.filter(Comment.flag > 0).count() return render_template('admin/index.html', user_count=user_count, photo_count=photo_count, tag_count=tag_count, comment_count=comment_count, locked_user_count=locked_user_count, blocked_user_count=blocked_user_count, reported_comments_count=reported_comments_count, reported_photos_count=reported_photos_count) @admin_bp.route('/profile/<int:user_id>', methods=['GET', 'POST']) @login_required @admin_required def edit_profile_admin(user_id): user = User.query.get_or_404(user_id) form = EditProfileAdminForm(user=user) if form.validate_on_submit(): user.name = form.name.data role = Role.query.get(form.role.data) if role.name == 'Locked': user.lock() user.role = role user.bio = form.bio.data user.website = form.website.data user.confirmed = form.confirmed.data user.active = form.active.data user.location = form.location.data user.username = form.username.data user.email = form.email.data db.session.commit() flash('Profile updated.', 'success') return redirect_back() form.name.data = user.name form.role.data = user.role_id form.bio.data = user.bio form.website.data = user.website form.location.data = user.location form.username.data = user.username form.email.data = user.email form.confirmed.data = user.confirmed form.active.data = user.active return render_template('admin/edit_profile.html', form=form, user=user) @admin_bp.route('/block/user/<int:user_id>', methods=['POST']) @login_required @permission_required('MODERATE') def block_user(user_id): user = User.query.get_or_404(user_id) if user.role.name in ['Administrator', 'Moderator']: flash('Permission denied.', 'warning') else: user.block() flash('Account blocked.', 'info') return redirect_back() @admin_bp.route('/unblock/user/<int:user_id>', methods=['POST']) @login_required @permission_required('MODERATE') def unblock_user(user_id): user = User.query.get_or_404(user_id) user.unblock() flash('Block canceled.', 'info') return redirect_back() @admin_bp.route('/lock/user/<int:user_id>', methods=['POST']) @login_required @permission_required('MODERATE') def lock_user(user_id): user = User.query.get_or_404(user_id) if user.role.name in ['Administrator', 'Moderator']: flash('Permission denied.', 'warning') else: user.lock() flash('Account locked.', 'info') return redirect_back() @admin_bp.route('/unlock/user/<int:user_id>', methods=['POST']) @login_required @permission_required('MODERATE') def unlock_user(user_id): user = User.query.get_or_404(user_id) user.unlock() flash('Lock canceled.', 'info') return redirect_back() @admin_bp.route('/delete/tag/<int:tag_id>', methods=['GET', 'POST']) @login_required @permission_required('MODERATE') def delete_tag(tag_id): tag = Tag.query.get_or_404(tag_id) db.session.delete(tag) db.session.commit() flash('Tag deleted.', 'info') return redirect_back() @admin_bp.route('/manage/user') @login_required @permission_required('MODERATE') def manage_user(): filter_rule = request.args.get('filter', 'all') # 'all', 'locked', 'blocked', 'administrator', 'moderator' page = request.args.get('page', 1, type=int) per_page = current_app.config['ALBUMY_MANAGE_USER_PER_PAGE'] administrator = Role.query.filter_by(name='Administrator').first() moderator = Role.query.filter_by(name='Moderator').first() if filter_rule == 'locked': filtered_users = User.query.filter_by(locked=True) elif filter_rule == 'blocked': filtered_users = User.query.filter_by(active=False) elif filter_rule == 'administrator': filtered_users = User.query.filter_by(role=administrator) elif filter_rule == 'moderator': filtered_users = User.query.filter_by(role=moderator) else: filtered_users = User.query pagination = filtered_users.order_by(User.member_since.desc()).paginate(page, per_page) users = pagination.items return render_template('admin/manage_user.html', pagination=pagination, users=users) @admin_bp.route('/manage/photo', defaults={'order': 'by_flag'}) @admin_bp.route('/manage/photo/<order>') @login_required @permission_required('MODERATE') def manage_photo(order): page = request.args.get('page', 1, type=int) per_page = current_app.config['ALBUMY_MANAGE_PHOTO_PER_PAGE'] order_rule = 'flag' if order == 'by_time': pagination = Photo.query.order_by(Photo.timestamp.desc()).paginate(page, per_page) order_rule = 'time' else: pagination = Photo.query.order_by(Photo.flag.desc()).paginate(page, per_page) photos = pagination.items return render_template('admin/manage_photo.html', pagination=pagination, photos=photos, order_rule=order_rule) @admin_bp.route('/manage/tag') @login_required @permission_required('MODERATE') def manage_tag(): page = request.args.get('page', 1, type=int) per_page = current_app.config['ALBUMY_MANAGE_TAG_PER_PAGE'] pagination = Tag.query.order_by(Tag.id.desc()).paginate(page, per_page) tags = pagination.items return render_template('admin/manage_tag.html', pagination=pagination, tags=tags) @admin_bp.route('/manage/comment', defaults={'order': 'by_flag'}) @admin_bp.route('/manage/comment/<order>') @login_required @permission_required('MODERATE') def manage_comment(order): page = request.args.get('page', 1, type=int) per_page = current_app.config['ALBUMY_MANAGE_COMMENT_PER_PAGE'] order_rule = 'flag' if order == 'by_time': pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(page, per_page) order_rule = 'time' else: pagination = Comment.query.order_by(Comment.flag.desc()).paginate(page, per_page) comments = pagination.items return render_template('admin/manage_comment.html', pagination=pagination, comments=comments, order_rule=order_rule)
36.575758
120
0.703535
46545c82a3826a31b5e646f1de593769a00e7ba8
665
py
Python
backend/eatandmeet/api/serializers.py
lysnikolaou/eatandmeet
fd9857f61d6e2e9861a3f4d477089af320181687
[ "MIT" ]
2
2019-07-05T09:00:19.000Z
2019-08-13T12:09:42.000Z
backend/eatandmeet/api/serializers.py
lysnikolaou/eatandmeet
fd9857f61d6e2e9861a3f4d477089af320181687
[ "MIT" ]
55
2019-05-20T16:24:39.000Z
2022-03-11T23:50:04.000Z
backend/eatandmeet/api/serializers.py
lysnikolaou/eatandmeet
fd9857f61d6e2e9861a3f4d477089af320181687
[ "MIT" ]
null
null
null
from rest_framework import serializers, fields from .models import Event, Comment, TOPIC_CHOICES class EventSerializer(serializers.ModelSerializer): topics = fields.MultipleChoiceField(choices = TOPIC_CHOICES) class Meta: model = Event fields = ('id', 'event_creator', 'title', 'date', 'topics', 'location', 'event_members', 'event_admins','slots','description','day') read_only_fields = ('event_creator', 'id') class CommentSerializer(serializers.ModelSerializer): class Meta: model = Comment fields = ('event', 'user', 'date', 'text', 'id') read_only_fields = ('event', 'user', 'date', 'id')
31.666667
140
0.664662
d5b2732e7b57e793e967c300de1f28a27b6267a8
8,132
py
Python
master/doctest.py
Keno/julia-buildbot
9bbd4068797ae88d59d5dd2a7c41bdaed64110a9
[ "MIT" ]
1
2021-06-20T02:46:51.000Z
2021-06-20T02:46:51.000Z
master/doctest.py
Keno/julia-buildbot
9bbd4068797ae88d59d5dd2a7c41bdaed64110a9
[ "MIT" ]
null
null
null
master/doctest.py
Keno/julia-buildbot
9bbd4068797ae88d59d5dd2a7c41bdaed64110a9
[ "MIT" ]
null
null
null
julia_doctest_factory = util.BuildFactory() julia_doctest_factory.useProgress = True julia_doctest_factory.addSteps([ # Fetch first (allowing failure if no existing clone is present) steps.ShellCommand( name="git fetch", command=["git", "fetch", "--tags", "--all", "--force"], flunkOnFailure=False ), # Clone julia steps.Git( name="Julia checkout", repourl=util.Property('repository', default='git://github.com/JuliaLang/julia.git'), mode='full', method='fresh', submodules=True, clobberOnFailure=True, progress=True, retryFetch=True, getDescription={'--tags': True}, ), # Make Julia itself steps.ShellCommand( name="make release", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 %(prop:flags)s %(prop:extra_make_flags)s release")], haltOnFailure = True, # Fail out if 60 minutes have gone by with nothing printed to stdout timeout=60*60, # Kill everything if the overall job has taken more than 2 hours maxTime=60*60*2, # Give the process 10 seconds to print out the current backtraces when being killed sigtermTime=10, ), steps.ShellCommand( name="make doctest", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C doc JULIA_PRECOMPILE=0 -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s doctest=true")], haltOnFailure = True, # Fail out if 60 minutes have gone by with nothing printed to stdout timeout=60*60, # Kill everything if the overall job has taken more than 2 hours maxTime=60*60*2, # Give the process 10 seconds to print out the current backtraces when being killed sigtermTime=10, ), steps.ShellCommand( name="make deploy", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C doc JULIA_PRECOMPILE=0 %(prop:flags)s %(prop:extra_make_flags)s deploy")], haltOnFailure=True, env={ 'DOCUMENTER_KEY': DOCUMENTER_KEY, 'TRAVIS_PULL_REQUEST': 'false', }, doStepIf=is_protected_pr, ), # Get JULIA_VERSION and JULIA_COMMIT from the build system steps.SetPropertyFromCommand( name="Get JULIA_VERSION", command=[util.Interpolate("%(prop:make_cmd)s"), "print-JULIA_VERSION"], extract_fn=lambda rc, stdout, stderr: {"JULIA_VERSION": stdout[stdout.find('=')+1:].strip()} ), steps.SetPropertyFromCommand( name="Get JULIA_COMMIT", command=[util.Interpolate("%(prop:make_cmd)s"), "print-JULIA_COMMIT"], extract_fn=lambda rc, stdout, stderr: {"JULIA_COMMIT": stdout[stdout.find('=')+1:].strip()} ), # We've already got Julia and the docs built; so let's build the source tarballs too steps.ShellCommand( name="clean out srccache", command=["/bin/sh", "-c", "rm -rf deps/srccache"], ), steps.ShellCommand( name="make light-source-dist", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=0 light-source-dist")], haltOnFailure = True, doStepIf=is_protected_pr, ), steps.FileUpload( name="Upload light source tarball", workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz"), masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz"), doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.ShellCommand( name="clean out srccache", command=["/bin/sh", "-c", "rm -rf deps/srccache"], ), steps.ShellCommand( name="make full-source-dist (without BB)", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=0 full-source-dist")], haltOnFailure = True, doStepIf=is_protected_pr, ), steps.FileUpload( name="Upload full source tarball", workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"), masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"), doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.ShellCommand( name="clean out srccache", command=["/bin/sh", "-c", "rm -rf deps/srccache"], ), steps.ShellCommand( name="make full-source-dist (with BB)", command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=1 full-source-dist")], haltOnFailure = True, doStepIf=is_protected_pr, ), steps.FileUpload( name="Upload full source+bb tarball", workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"), masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full+bb.tar.gz"), doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), # Sign and upload on the master steps.MasterShellCommand( name="gpg sign light source tarball on master", command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz")], doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.MasterShellCommand( name="gpg sign full source tarball on master", command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz")], doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.MasterShellCommand( name="gpg sign full+bb source tarball on master", command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full+bb.tar.gz")], doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.MasterShellCommand( name="Upload source tarballs to AWS", command=render_srcdist_upload_command, haltOnFailure=True, doStepIf=is_protected_pr, hideStepIf=lambda results, s: results==SKIPPED, ), steps.MasterShellCommand( name="Cleanup Master", command=["sh", "-c", util.Interpolate("rm -vf /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s* ;")], flunkOnFailure=False, haltOnFailure=False, ), ]) c['schedulers'].append(schedulers.AnyBranchScheduler( name="Julia Doctesting and source upload", change_filter=util.ChangeFilter(filter_fn=julia_branch_nonskip_filter), builderNames=["doctest_linux64"], treeStableTimer=1, )) # Add workers for these jobs c['builders'].append(util.BuilderConfig( name="doctest_linux64", workernames=builder_mapping["linux64"], collapseRequests=False, tags=["Packaging"], factory=julia_doctest_factory, )) # Add a scheduler for building release candidates/triggering builds manually c['schedulers'].append(schedulers.ForceScheduler( name="doctest", label="Force doctest", builderNames=["doctest_linux64"], reason=util.FixedParameter(name="reason", default=""), codebases=[ util.CodebaseParameter( "", name="", branch=util.FixedParameter(name="branch", default=""), repository=util.FixedParameter(name="repository", default=""), project=util.FixedParameter(name="project", default="Packaging"), ) ], properties=[ util.StringParameter( name="extra_make_flags", label="Extra Make Flags", size=30, default="", ), ], ))
40.457711
173
0.648549
0c800815b98128f5fcd530f4b6d7e5fdaaa9083b
5,859
py
Python
pytype/errors_test.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
11
2017-02-12T12:19:50.000Z
2022-03-06T08:56:48.000Z
pytype/errors_test.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
null
null
null
pytype/errors_test.py
JelleZijlstra/pytype
962a0ebc05bd24dea172381b2bedcc547ba53dd5
[ "Apache-2.0" ]
2
2017-06-27T14:41:57.000Z
2021-12-05T11:27:33.000Z
"""Test errors.py.""" import collections import csv import textwrap from pytype import errors from pytype import utils import unittest _TEST_ERROR = "test-error" _MESSAGE = "an error message" FakeCode = collections.namedtuple("FakeCode", "co_filename co_name") class FakeOpcode(object): def __init__(self, filename, line, methodname): self.code = FakeCode(filename, methodname) self.line = line class ErrorTest(unittest.TestCase): @errors._error_name(_TEST_ERROR) def test_init(self): e = errors.Error(errors.SEVERITY_ERROR, _MESSAGE, filename="foo.py", lineno=123, column=2, linetext="hello", methodname="foo") self.assertEquals(errors.SEVERITY_ERROR, e._severity) self.assertEquals(_MESSAGE, e._message) self.assertEquals(e._name, _TEST_ERROR) self.assertEquals("foo.py", e._filename) self.assertEquals(123, e._lineno) self.assertEquals(2, e._column) self.assertEquals("hello", e._linetext) self.assertEquals("foo", e._methodname) @errors._error_name(_TEST_ERROR) def test_at_opcode(self): # Opcode of None. e = errors.Error.at_opcode(None, errors.SEVERITY_ERROR, _MESSAGE) self.assertEquals(errors.SEVERITY_ERROR, e._severity) self.assertEquals(_MESSAGE, e._message) self.assertEquals(e._name, _TEST_ERROR) self.assertEquals(None, e._filename) self.assertEquals(0, e._lineno) self.assertEquals(None, e._column) self.assertEquals(None, e._linetext) self.assertEquals(None, e._methodname) # Opcode of None. op = FakeOpcode("foo.py", 123, "foo") e = errors.Error.at_opcode(op, errors.SEVERITY_ERROR, _MESSAGE) self.assertEquals(errors.SEVERITY_ERROR, e._severity) self.assertEquals(_MESSAGE, e._message) self.assertEquals(e._name, _TEST_ERROR) self.assertEquals("foo.py", e._filename) self.assertEquals(123, e._lineno) self.assertEquals(None, e._column) self.assertEquals(None, e._linetext) self.assertEquals("foo", e._methodname) def test__error_name(self): # This should be true as long as at least one method is annotated with # _error_name(_TEST_ERROR). self.assertIn(_TEST_ERROR, errors._ERROR_NAMES) def test_no_error_name(self): # It is illegal to create an error outside of an @error_name annotation. self.assertRaises(AssertionError, errors.Error, errors.SEVERITY_ERROR, _MESSAGE) @errors._error_name(_TEST_ERROR) def test_str(self): e = errors.Error(errors.SEVERITY_ERROR, _MESSAGE, filename="foo.py", lineno=123, column=2, linetext="hello", methodname="foo") self.assertEquals( 'File "foo.py", line 123, in foo: an error message [test-error]', str(e)) def test_from_csv_row(self): row = ["a.py", "123", "index-error", "This is an error", "with\nsome\ndetails: 1, 2, 3"] error = errors.Error.from_csv_row(row) self.assertEquals(error.filename, row[0]) self.assertEquals(error.lineno, int(row[1])) self.assertEquals(error.name, row[2]) self.assertEquals(error.message, row[3] + "\n" + row[4]) @errors._error_name(_TEST_ERROR) def test_write_to_csv(self): errorlog = errors.ErrorLog() op = FakeOpcode("foo.py", 123, "foo") message, details = "This is an error", "with\nsome\ndetails: \"1\", 2, 3" errorlog.error(op, message, details + "0") errorlog.error(op, message, details + "1") with utils.Tempdir() as d: filename = d.create_file("errors.csv") error = errorlog.print_to_csv_file(filename) with open(filename, "rb") as fi: rows = list(csv.reader(fi, delimiter=",")) self.assertEquals(2, len(rows)) for i, row in enumerate(rows): error = errors.Error.from_csv_row(row) self.assertEquals(error.filename, "foo.py") self.assertEquals(error.lineno, 123) self.assertEquals(error.name, _TEST_ERROR) self.assertEquals(error.message, message + "\n" + details + str(i)) class ErrorLogBaseTest(unittest.TestCase): @errors._error_name(_TEST_ERROR) def test_error(self): errorlog = errors.ErrorLog() op = FakeOpcode("foo.py", 123, "foo") errorlog.error(op, "unknown attribute %s" % "xyz") self.assertEquals(1, len(errorlog)) e = list(errorlog)[0] # iterate the log and save the first error. self.assertEquals(errors.SEVERITY_ERROR, e._severity) self.assertEquals("unknown attribute xyz", e._message) self.assertEquals(e._name, _TEST_ERROR) self.assertEquals("foo.py", e._filename) @errors._error_name(_TEST_ERROR) def test_error_with_details(self): errorlog = errors.ErrorLog() errorlog.error(None, "My message", "one\ntwo") self.assertEquals(textwrap.dedent("""\ My message [test-error] one two """), str(errorlog)) @errors._error_name(_TEST_ERROR) def test_warn(self): errorlog = errors.ErrorLog() op = FakeOpcode("foo.py", 123, "foo") errorlog.warn(op, "unknown attribute %s", "xyz") self.assertEquals(1, len(errorlog)) e = list(errorlog)[0] # iterate the log and save the first error. self.assertEquals(errors.SEVERITY_WARNING, e._severity) self.assertEquals("unknown attribute xyz", e._message) self.assertEquals(e._name, _TEST_ERROR) self.assertEquals("foo.py", e._filename) @errors._error_name(_TEST_ERROR) def test_has_error(self): errorlog = errors.ErrorLog() self.assertFalse(errorlog.has_error()) # A warning is part of the error log, but isn't severe. errorlog.warn(None, "A warning") self.assertEquals(1, len(errorlog)) self.assertFalse(errorlog.has_error()) # An error is severe. errorlog.error(None, "An error") self.assertEquals(2, len(errorlog)) self.assertTrue(errorlog.has_error()) if __name__ == "__main__": unittest.main()
35.72561
78
0.687489
02f2892e7e02122a6a6b515d746a195fd27bd4f9
302
py
Python
betfund_event_broker/tasks/mongo/__init__.py
betfund/betfund-event-broker
524aec73d9cf66cbeeb0fab67e6816b836c1d98e
[ "MIT" ]
1
2020-09-23T02:36:35.000Z
2020-09-23T02:36:35.000Z
betfund_event_broker/tasks/mongo/__init__.py
betfund/betfund-event-broker
524aec73d9cf66cbeeb0fab67e6816b836c1d98e
[ "MIT" ]
5
2020-04-13T23:55:07.000Z
2020-06-04T15:09:12.000Z
betfund_event_broker/tasks/mongo/__init__.py
betfund/betfund-event-broker
524aec73d9cf66cbeeb0fab67e6816b836c1d98e
[ "MIT" ]
null
null
null
"""Betfund Event Broker MongoDB namesapce.""" from .base_task import MongoTask from .find import MongoFindEvents from .upsert_events import MongoEventsUpsert from .upsert_odds import MongoOddsUpsert __all__ = [ "MongoFindEvents", "MongoEventsUpsert", "MongoOddsUpsert", "MongoTask" ]
21.571429
45
0.761589
21b7e280aa5854449ab8af0895d6f286ee0e92e4
1,044
py
Python
Misc/Legacy/Avg_data_getter.py
BadenLab/2Panalysis
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
[ "MIT" ]
null
null
null
Misc/Legacy/Avg_data_getter.py
BadenLab/2Panalysis
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
[ "MIT" ]
null
null
null
Misc/Legacy/Avg_data_getter.py
BadenLab/2Panalysis
8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Oct 2 12:27:40 2021 @author: skrem """ import pandas as pd def Avg_data_getter(file): """A function which gets and prepares data, as well as returning some additional params like an number of ROIs and their corresponding labels. Parameters --------- File: The directory of a CSV .txt file containing the data, with each ROI represented as individual columns in the file. Returns ------- Stimulus DataFrame, stimulus array (alt), number of ROIs, and their labels """ avgs_data = pd.read_csv((file), sep="\t", header=None, engine = "python") ##Label data ROI_num = avgs_data.shape[1] "Can put optional manipulations to data here" #Consider making data loading its own function? averages_dataframe = avgs_data #pd.read_csv((file), sep="\t", header=None) #, names = labels) avgerages_array = pd.DataFrame.to_numpy(averages_dataframe) return averages_dataframe, avgerages_array, ROI_num
29
98
0.662835
7b71ff6bb90097ccaf9e46989e8ea99594ac6ff2
6,110
py
Python
indicators/moneyflowindex.py
webclinic017/pyStock-1
4ed6bf20130dcfc37d542bd5b3aec505a12f3106
[ "MIT" ]
45
2020-01-29T18:17:02.000Z
2022-02-13T04:52:46.000Z
indicators/moneyflowindex.py
webclinic017/pyStock-1
4ed6bf20130dcfc37d542bd5b3aec505a12f3106
[ "MIT" ]
27
2020-04-29T12:11:14.000Z
2021-03-13T08:54:33.000Z
indicators/moneyflowindex.py
webclinic017/pyStock-1
4ed6bf20130dcfc37d542bd5b3aec505a12f3106
[ "MIT" ]
8
2020-02-15T10:59:35.000Z
2020-12-04T20:01:55.000Z
# Add import from parent directory possible import pandas as pd import matplotlib.pyplot as plt from helpers.DataOperations import * from core.ReportSignals import * from helpers.Stock import * from core.indicator import indicator # Creates MoneyFlowIndex object def CreateMoneyFlowIndex(high, low, close, volume, info, n=14): return MoneyFlowIndex(high, low, close, volume, info, n) # MoneyFlowIndex object which creates MoneyFlowIndex data class MoneyFlowIndex(indicator): def __init__(self, high, low, close, volume, info, n=14): indicator.__init__(self, 'MFI%u' % n, 'momentum', close.index) self.n = n self.info = info self.typicalPrice = (high + low + close) / 3 self.moneyFlow, self.posFlow, self.negFlow, self.mfi = self.InitMoneyFlow( self.typicalPrice, volume, n) # money on the market plot self.moneyMarket = self.moneyFlow.cumsum() # Signals fromBottom, fromTop = FindIntersections(self.mfi, 20) self.buy = fromBottom fromBottom, fromTop = FindIntersections(self.mfi, 80) self.sell = fromTop # TrenToFall / TrendToRise fromBottom, fromTop = FindIntersections(self.mfi, 10) self.buyStrong = fromBottom fromBottom, fromTop = FindIntersections(self.mfi, 90) self.sellStrong = fromTop # returns AverageTrueRange def GetMoneyFlow(self): return self.MoneyFlow # Set MoneyFlowIndex indicator @staticmethod def InitMoneyFlow(tp, volume, n): moneyFlow = tp * volume posFlow = pd.Series() negFlow = pd.Series() for i in range(1, len(moneyFlow)): if (moneyFlow[i] >= 0): posFlow = posFlow.append( pd.Series(moneyFlow.values[i], index=[moneyFlow.index[i]])) negFlow = negFlow.append( pd.Series(0, index=[moneyFlow.index[i]])) else: posFlow = posFlow.append( pd.Series(0, index=[moneyFlow.index[i]])) negFlow = negFlow.append( pd.Series(abs(moneyFlow.values[i]), index=[moneyFlow.index[i]])) posFlowAvg = CreateMovingAverage(posFlow, n) negFlowAvg = CreateMovingAverage(negFlow, n) moneyFlowIndex = (100 * posFlowAvg) / (posFlowAvg + negFlowAvg) return moneyFlow, posFlow, negFlow, moneyFlowIndex # Export indicator signals to report def ExportSignals(self, reportSignals): reportSignals.AddDataframeSignals(self.buy, 'MFI', 'buy') reportSignals.AddDataframeSignals(self.sell, 'MFI', 'sell') reportSignals.AddDataframeSignals(self.buyStrong, 'MFI', 'buyStrong') reportSignals.AddDataframeSignals(self.sellStrong, 'MFI', 'sellStrong') # retunrs -100...100 value def GetUnifiedValue(self): return (self.mfi[-1] - 50)*2 # Plot method def PlotPosNegFlow(self): plt.bar(self.negFlow.index, self.negFlow, color='red', label='') plt.bar(self.posFlow.index, self.posFlow, color='green', label='') # MoneyFlowIndex # plt.plot(self.toNumIndex(self.posFlow), self.posFlow, label='PosFlow' + str(self.n), linewidth=1.0, color = 'green') # plt.plot(self.toNumIndex(self.negFlow), self.negFlow, label='NegFlow' + str(self.n), linewidth=1.0, color = 'red') # Plot method def Plot(self): # MoneyFlowIndex plt.plot(self.toNumIndex(self.mfi), self.mfi, label='MFI' + str(self.n), linewidth=1.0, color='#000000') # OverBought overBought = CreateHorizontalLine(self.mfi.index, 80, 80, True) plt.plot(self.toNumIndex(overBought), overBought, '--', label='Overbought', linewidth=1.0, color='#940006') # x_axis = self.toNumIndex(self.mfi) # plt.fill_between(x_axis, self.mfi, overBought['value'], # where=self.mfi>overBought.values,color='#ffb3b3') # OverBought - Gene Quong and Avrum Soudack overBought = CreateHorizontalLine(self.mfi.index, 90, 90) plt.plot(self.toNumIndex(overBought), overBought, '--', linewidth=0.6, color='#940006') # OverSold overSold = CreateHorizontalLine(self.mfi.index, 20, 20, True) plt.plot(self.toNumIndex(overSold), overSold, '--', label='Oversold', linewidth=1.0, color='#169400') # plt.fill_between(x_axis, self.mfi, overSold['value'], # where=self.mfi<overSold.values,color='#b3ffb3') # OverSold - Gene Quong and Avrum Soudack overSold = CreateHorizontalLine(self.mfi.index, 10, 10) plt.plot(self.toNumIndex(overSold), overSold, '--', linewidth=0.6, color='#169400') # # Signals plottting if (self.buy is not None and self.buy.size): plt.plot(self.toNumIndex(self.buy), self.buy, 'o', color='#000000', ms=8) plt.plot(self.toNumIndex(self.buy), self.buy, 'o', label='Buy', color='#00FF00') if (self.buyStrong is not None and self.buyStrong.size): plt.plot(self.toNumIndex(self.buyStrong), self.buyStrong, 's', color='#000000', ms=8) plt.plot(self.toNumIndex(self.buyStrong), self.buyStrong, 's', label='BuyStrong', color='#00FF00') if (self.sell is not None and self.sell.size): plt.plot(self.toNumIndex(self.sell), self.sell, 'o', color='#000000', ms=8) plt.plot(self.toNumIndex(self.sell), self.sell, 'o', label='Sell', color='#FF0000') if (self.sellStrong is not None and self.sellStrong.size): plt.plot(self.toNumIndex(self.sellStrong), self.sellStrong, 's', color='#000000', ms=8) plt.plot(self.toNumIndex(self.sellStrong), self.sellStrong, 's', label='SellStrong', color='#FF0000') # Limits of plot plt.ylim(top=100, bottom=0)
43.333333
130
0.605728
748d2191e8d1179f4d258d1d6370fb45d68d9c09
11,477
py
Python
data_prep_scripts/npz_shuffler.py
dlpbc/comma.ai-speed-challenge
fd4bd8097feb859234de9333700838fe71efb9dc
[ "MIT" ]
26
2018-04-16T04:30:33.000Z
2021-06-09T17:39:52.000Z
data_prep_scripts/npz_shuffler.py
dlpbc/comma.ai-speed-challenge
fd4bd8097feb859234de9333700838fe71efb9dc
[ "MIT" ]
2
2019-02-11T09:43:54.000Z
2020-07-14T23:37:17.000Z
data_prep_scripts/npz_shuffler.py
dlpbc/comma.ai-speed-challenge
fd4bd8097feb859234de9333700838fe71efb9dc
[ "MIT" ]
10
2018-05-12T18:26:54.000Z
2020-12-27T16:55:36.000Z
''' The script takes 2 or 3 npz batch files (containing example clips) and shuffles them. The reason for this is because a single batch file can contain a large number of examples within a speed (label) range category. A batch might contain more of 11-15 speed range example clips than any other speed range and this affects training. The entire dataset needs to be properly shuffled. This issue stems from the nature of video frames speed label, since you can have several number of consecutive frames within thesame speed range. ''' import os import argparse import numpy as np ''' this takes 2 npz files containing examples, split each of them into 2 parts, which is used to produce 2 new files by mixing. The idea is described below The first new file is produced by combining: first slice of the first file + second slice of the second file The second new file is produced by combinining: first slice of the second file + second slice of the first file Example: Given the files slices below: first file: | 1.1 | 1.2 | second file: | 2.1 | 2.2 | we can produce three new files by combining slices (of current files) as shown below: first new file: | 1.1 | 2.2 | second new file: | 2.1 | 1.2 | Args: file1_path: path to the first npz file file2_path: path to the second npz file save_dir: directory to save the new npz files ''' def two_split_mixer(file1_path, file2_path, save_dir=None): # load npz files file1 = np.load(file1_path) file2 = np.load(file2_path) X_data1 = file1['examples'] y_data1 = file1['labels'] X_data2 = file2['examples'] y_data2 = file2['labels'] # initial data shuffle before slicing # data1 indexes = np.arange(X_data1.shape[0]) np.random.shuffle(indexes) X_data1 = X_data1[indexes] y_data1 = y_data1[indexes] # data2 indexes = np.arange(X_data2.shape[0]) np.random.shuffle(indexes) X_data2 = X_data2[indexes] y_data2 = y_data2[indexes] # sanity check if not (X_data1.shape[0] == X_data2.shape[0]): print('number of examples in the two files should be thesame') print('please correct your npz files and try again') return half_point_idx = X_data1.shape[0] // 2 # data1 X, y slices first_slice_X_data1 = X_data1[ : half_point_idx] second_slice_X_data1 = X_data1[half_point_idx : ] first_slice_y_data1 = y_data1[ : half_point_idx] second_slice_y_data1 = y_data1[half_point_idx : ] # data2 X, y slices first_slice_X_data2 = X_data2[ : half_point_idx] second_slice_X_data2 = X_data2[half_point_idx : ] first_slice_y_data2 = y_data2[ : half_point_idx] second_slice_y_data2 = y_data2[half_point_idx : ] new_X_data1 = np.concatenate([first_slice_X_data1, second_slice_X_data2], axis=0) new_X_data2 = np.concatenate([first_slice_X_data2, second_slice_X_data1], axis=0) new_y_data1 = np.concatenate([first_slice_y_data1, second_slice_y_data2], axis=0) new_y_data2 = np.concatenate([first_slice_y_data2, second_slice_y_data1], axis=0) # shuffle data # data1 indexes = np.arange(new_X_data1.shape[0]) np.random.shuffle(indexes) new_X_data1 = new_X_data1[indexes] new_y_data1 = new_y_data1[indexes] # data2 indexes = np.arange(new_X_data2.shape[0]) np.random.shuffle(indexes) new_X_data2 = new_X_data2[indexes] new_y_data2 = new_y_data2[indexes] save_file1_path = file1_path.split('/')[-1] save_file2_path = file2_path.split('/')[-1] if save_dir is not None: save_file1_path = '{0}/{1}'.format(save_dir, save_file1_path) save_file2_path = '{0}/{1}'.format(save_dir, save_file2_path) np.savez(save_file1_path, examples=new_X_data1, labels=new_y_data1) np.savez(save_file2_path, examples=new_X_data2, labels=new_y_data2) return ''' this takes 3 npz files containing examples, split each of them into 3 parts, which is used to produce 3 new files by mixing. The idea is described below The first new file is produced by combining: first slice of the first file + second slice of the second file + third slice of third file The second new file is produced by combinining: first slice of the second file + second slice of the third file + third slice of the first file The third new file is produced by combining: first slice of the third file + second lsice of the first file + third slice of the second file Example: Given the files slices below: first file: | 1.1 | 1.2 | 1.3 | second file: | 2.1 | 2.2 | 2.3 | third file: | 3.1 | 3.2 | 3.3 | we can produce three new files by combining slices (of current files) as shown below: first new file: | 1.1 | 2.2 | 3.3 | second new file: | 2.1 | 3.2 | 1.3 | third new file: | 3.1 | 1.2 | 2.3 | Args: file1_path: path to the first npz file file2_path: path to the second npz file file3_path: path to the third npz file save_dir: directory to save the new npz files ''' def three_split_mixer(file1_path, file2_path, file3_path, save_dir=None): # load npz files file1 = np.load(file1_path) file2 = np.load(file2_path) file3 = np.load(file3_path) X_data1 = file1['examples'] y_data1 = file1['labels'] X_data2 = file2['examples'] y_data2 = file2['labels'] X_data3 = file3['examples'] y_data3 = file3['labels'] # initial data shuffle before slicing # data1 indexes = np.arange(X_data1.shape[0]) np.random.shuffle(indexes) X_data1 = X_data1[indexes] y_data1 = y_data1[indexes] # data2 indexes = np.arange(X_data2.shape[0]) np.random.shuffle(indexes) X_data2 = X_data2[indexes] y_data2 = y_data2[indexes] # data3 indexes = np.arange(X_data3.shape[0]) np.random.shuffle(indexes) X_data3 = X_data3[indexes] y_data3 = y_data3[indexes] # sanity check if not (X_data1.shape[0] == X_data2.shape[0] == X_data3.shape[0]): print('number of examples in the three files should be thesame') print('please correct your npz files and try again') return one_third_idx = X_data1.shape[0] // 3 two_third_idx = (one_third_idx + one_third_idx) # data1 X, y slices first_slice_X_data1 = X_data1[ : one_third_idx] second_slice_X_data1 = X_data1[one_third_idx : two_third_idx] third_slice_X_data1 = X_data1[two_third_idx : ] first_slice_y_data1 = y_data1[ : one_third_idx] second_slice_y_data1 = y_data1[one_third_idx : two_third_idx] third_slice_y_data1 = y_data1[two_third_idx : ] # data2 X, y slices first_slice_X_data2 = X_data2[ : one_third_idx] second_slice_X_data2 = X_data2[one_third_idx : two_third_idx] third_slice_X_data2 = X_data2[two_third_idx : ] first_slice_y_data2 = y_data2[ : one_third_idx] second_slice_y_data2 = y_data2[one_third_idx : two_third_idx] third_slice_y_data2 = y_data2[two_third_idx : ] # data3 X, y slices first_slice_X_data3 = X_data3[ : one_third_idx] second_slice_X_data3 = X_data3[one_third_idx : two_third_idx] third_slice_X_data3 = X_data3[two_third_idx : ] first_slice_y_data3 = y_data3[ : one_third_idx] second_slice_y_data3 = y_data3[one_third_idx : two_third_idx] third_slice_y_data3 = y_data3[two_third_idx : ] new_X_data1 = np.concatenate([first_slice_X_data1, second_slice_X_data2, third_slice_X_data3], axis=0) new_X_data2 = np.concatenate([first_slice_X_data2, second_slice_X_data3, third_slice_X_data1], axis=0) new_X_data3 = np.concatenate([first_slice_X_data3, second_slice_X_data1, third_slice_X_data2], axis=0) new_y_data1 = np.concatenate([first_slice_y_data1, second_slice_y_data2, third_slice_y_data3], axis=0) new_y_data2 = np.concatenate([first_slice_y_data2, second_slice_y_data3, third_slice_y_data1], axis=0) new_y_data3 = np.concatenate([first_slice_y_data3, second_slice_y_data1, third_slice_y_data2], axis=0) # shuffle data # data1 indexes = np.arange(new_X_data1.shape[0]) np.random.shuffle(indexes) new_X_data1 = new_X_data1[indexes] new_y_data1 = new_y_data1[indexes] # data2 indexes = np.arange(new_X_data2.shape[0]) np.random.shuffle(indexes) new_X_data2 = new_X_data2[indexes] new_y_data2 = new_y_data2[indexes] # data3 indexes = np.arange(new_X_data3.shape[0]) np.random.shuffle(indexes) new_X_data3 = new_X_data3[indexes] new_y_data3 = new_y_data3[indexes] save_file1_path = file1_path.split('/')[-1] save_file2_path = file2_path.split('/')[-1] save_file3_path = file3_path.split('/')[-1] if save_dir is not None: save_file1_path = '{0}/{1}'.format(save_dir, save_file1_path) save_file2_path = '{0}/{1}'.format(save_dir, save_file2_path) save_file3_path = '{0}/{1}'.format(save_dir, save_file3_path) np.savez(save_file1_path, examples=new_X_data1, labels=new_y_data1) np.savez(save_file2_path, examples=new_X_data2, labels=new_y_data2) np.savez(save_file3_path, examples=new_X_data3, labels=new_y_data3) return def main(args): if args.save_dir is not None: if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) if args.num_files == 2: # shuffling examples in two npz files two_split_mixer(args.npz_file1_path, args.npz_file2_path, args.save_dir) elif args.num_files == 3: # shuffling examples in three npz files three_split_mixer(args.npz_file1_path, args.npz_file2_path, args.npz_file3_path, args.save_dir) else: print('ERROR: Number of files to mix should be 2 or 3') print('Exiting') return if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('num_files', help='Number of npz files to mix. [i.e. 2 or 3]', choices=[2,3], type=int) parser.add_argument('npz_file1_path', help='Path to the first npz file', type=str) parser.add_argument('npz_file2_path', help='Path to the second npz file', type=str) parser.add_argument('--npz-file3-path', help='Path to the third npz file', type=str) parser.add_argument('--save-dir', help='Directory to save generated files', type=str) main(parser.parse_args())
34.884498
103
0.632308
e8963f6237e3584245f2d15d48e5f6ea02585133
6,298
py
Python
montage/app.py
baturin/montage
9d1bb640d0d4f78359000d18de2206be84c12be0
[ "BSD-3-Clause" ]
33
2016-08-22T17:09:16.000Z
2021-08-30T15:49:09.000Z
montage/app.py
baturin/montage
9d1bb640d0d4f78359000d18de2206be84c12be0
[ "BSD-3-Clause" ]
197
2016-10-02T20:16:29.000Z
2022-02-26T16:59:44.000Z
montage/app.py
baturin/montage
9d1bb640d0d4f78359000d18de2206be84c12be0
[ "BSD-3-Clause" ]
11
2016-09-01T15:52:59.000Z
2021-11-14T20:54:33.000Z
import sys import os.path import logging from clastic import Application, StaticFileRoute, MetaApplication from clastic.static import StaticApplication from clastic.middleware import HTTPCacheMiddleware from clastic.middleware.cookie import SignedCookieMiddleware, NEVER from clastic.render import AshesRenderFactory, render_basic from sqlalchemy import create_engine, event from sqlalchemy.orm import sessionmaker from mwoauth import ConsumerToken from mw import (UserMiddleware, UserIPMiddleware, TimingMiddleware, LoggingMiddleware, ReplayLogMiddleware, DBSessionMiddleware, MessageMiddleware) from rdb import Base, bootstrap_maintainers, ensure_series from utils import get_env_name, load_env_config from check_rdb import get_schema_errors, ping_connection from meta_endpoints import META_API_ROUTES, META_UI_ROUTES from juror_endpoints import JUROR_API_ROUTES, JUROR_UI_ROUTES from admin_endpoints import ADMIN_API_ROUTES, ADMIN_UI_ROUTES from public_endpoints import PUBLIC_API_ROUTES, PUBLIC_UI_ROUTES import sentry_sdk from clastic_sentry import SentryMiddleware DEFAULT_DB_URL = 'sqlite:///tmp_montage.db' CUR_PATH = os.path.dirname(os.path.abspath(__file__)) PROJ_PATH = os.path.dirname(CUR_PATH) STATIC_PATH = os.path.join(CUR_PATH, 'static') TEMPLATES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') def create_app(env_name='prod', config=None): # rendering is handled by MessageMiddleware ui_routes = (PUBLIC_UI_ROUTES + JUROR_UI_ROUTES + ADMIN_UI_ROUTES + META_UI_ROUTES) api_routes = (PUBLIC_API_ROUTES + JUROR_API_ROUTES + ADMIN_API_ROUTES + META_API_ROUTES) print '== creating WSGI app using env name: %s' % (env_name,) logging.basicConfig() logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN) if config is None: config = load_env_config(env_name=env_name) print '== loaded config file: %s' % (config['__file__'],) engine = create_engine(config.get('db_url', DEFAULT_DB_URL), pool_recycle=60) session_type = sessionmaker() session_type.configure(bind=engine) tmp_rdb_session = session_type() schema_errors = get_schema_errors(Base, tmp_rdb_session) if not schema_errors: print '++ schema validated ok' else: for err in schema_errors: print '!! ', err print '!! recreate the database and update the code, then try again' sys.exit(2) # create maintainer users if they don't exist yet musers = bootstrap_maintainers(tmp_rdb_session) if musers: print '++ created new users for maintainers: %r' % (musers,) new_series = ensure_series(tmp_rdb_session) if new_series: print '++ created new series: %r' % new_series tmp_rdb_session.commit() engine.echo = config.get('db_echo', False) if not config.get('db_disable_ping'): event.listen(engine, 'engine_connect', ping_connection) renderer = AshesRenderFactory(TEMPLATES_PATH) cookie_secret = config['cookie_secret'] assert cookie_secret root_path = config.get('root_path', '/') scm_secure = env_name == 'prod' # https only in prod scm_mw = SignedCookieMiddleware(secret_key=cookie_secret, path=root_path, http_only=True, secure=scm_secure) if not scm_secure: scm_mw.data_expiry = NEVER def get_engine(): engine = create_engine(config.get('db_url', DEFAULT_DB_URL), pool_recycle=60) engine.echo = config.get('db_echo', False) if not config.get('db_disable_ping'): event.listen(engine, 'engine_connect', ping_connection) return engine blank_session_type = sessionmaker() middlewares = [TimingMiddleware(), UserIPMiddleware(), scm_mw, DBSessionMiddleware(blank_session_type, get_engine), UserMiddleware()] api_log_path = config.get('api_log_path', 'montage_api.log') if api_log_path: log_mw = LoggingMiddleware(api_log_path) middlewares.insert(0, log_mw) # hack config['api_exc_log_path'] = getattr(log_mw, 'exc_log_path', None) replay_log_path = config.get('replay_log_path') if replay_log_path: replay_log_mw = ReplayLogMiddleware(replay_log_path) middlewares.append(replay_log_mw) consumer_token = ConsumerToken(config['oauth_consumer_token'], config['oauth_secret_token']) resources = {'config': config, 'consumer_token': consumer_token, 'root_path': root_path, 'ashes_renderer': renderer} debug_errors = bool(os.getenv('MONTAGE_PDB', False)) or config['__env__'] == 'devtest' api_app = Application(api_routes, resources, middlewares=[MessageMiddleware(debug_errors=debug_errors)] + middlewares, render_factory=render_basic) ui_app = Application(ui_routes, resources, middlewares=[MessageMiddleware(debug_errors=debug_errors, use_ashes=True)] + middlewares, render_factory=renderer) static_app = StaticApplication(STATIC_PATH) root_mws = [HTTPCacheMiddleware(use_etags=True)] if not debug_errors: # don't need sentry if you've got pdb, etc. sentry_sdk.init(environment=config['__env__'], request_bodies='medium', dsn="https://5738a89dcd5e4b599f7a801fd63bc217@sentry.io/3532775") root_mws.append(SentryMiddleware()) root_app = Application([StaticFileRoute('/', STATIC_PATH + '/index.html'), StaticFileRoute('/a/', STATIC_PATH + '/a/index.html'), ('/', static_app), ('/', ui_app), ('/v1/', api_app), ('/meta', MetaApplication())], resources={'config': config}, middlewares=root_mws) return root_app
37.266272
114
0.651318
64d8158189af51f7b0990951973ca1a4db773201
1,963
py
Python
docs/conf.py
dymaxionlabs/nb_workflows
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
[ "Apache-2.0" ]
null
null
null
docs/conf.py
dymaxionlabs/nb_workflows
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
[ "Apache-2.0" ]
null
null
null
docs/conf.py
dymaxionlabs/nb_workflows
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
[ "Apache-2.0" ]
null
null
null
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys root_directory = os.path.dirname(os.getcwd()) sys.path.insert(0, root_directory) import nb_workflows # -- Project information ----------------------------------------------------- project = "NB Workflows" copyright = "2022, Xavier Petit" author = "Xavier Petit" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx_rtd_theme"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"]
35.690909
79
0.674478
c2a0cc2b77b21032af52f8bd7f7a9bc0f20032ca
521
py
Python
MiddleKit/Tests/MKRefresh.mkmodel/TestEmpty.py
PeaceWorksTechnologySolutions/w4py
74f5a03a63f1a93563502b908474aefaae2abda2
[ "MIT" ]
18
2016-08-01T20:15:59.000Z
2019-12-24T16:00:03.000Z
MiddleKit/Tests/MKRefresh.mkmodel/TestEmpty.py
WebwareForPython/w4py
bba08f5974d49f5da7e88abe3eeda1037d0824a3
[ "MIT" ]
6
2016-09-13T05:48:45.000Z
2020-01-09T18:29:12.000Z
webware/MiddleKit/Tests/MKRefresh.mkmodel/TestEmpty.py
PeaceWorksTechnologySolutions/w4py3-middlekit
a9554e20c47010e7b0c0deee63e1786482c59a1c
[ "MIT" ]
6
2016-09-16T14:32:29.000Z
2020-01-03T18:52:16.000Z
def test(store): from Book import Book b = Book() b.setTitle('foo') store.addObject(b) store.saveChanges() serialNum = b.serialNum() b = store.fetchObject(Book, serialNum) b.setTitle('bar') try: b = store.fetchObject(Book, serialNum) except AssertionError: # an assertion _should_ be generated, because we are attempting # to refresh a modified object. pass else: assert 0, 'Should have got an assertion failure, but none was raised.'
26.05
78
0.633397
b3eee4d0616eb8b748b0c9a4450df0f15cdd34c5
1,016
py
Python
tests/test_cli.py
shakhat/python-license-check
708545e189bc9bb1e45ad359cb9c1f3c4525ca17
[ "Apache-2.0" ]
null
null
null
tests/test_cli.py
shakhat/python-license-check
708545e189bc9bb1e45ad359cb9c1f3c4525ca17
[ "Apache-2.0" ]
null
null
null
tests/test_cli.py
shakhat/python-license-check
708545e189bc9bb1e45ad359cb9c1f3c4525ca17
[ "Apache-2.0" ]
null
null
null
from liccheck.command_line import parse_args, read_strategy, run, Level def test_parse_arguments(): args = parse_args(['--sfile', 'my_strategy.ini']) assert args.strategy_ini_file == 'my_strategy.ini' assert args.requirement_txt_file == './requirements.txt' assert args.level is Level.STANDARD args = parse_args(['--sfile', 'my_strategy.ini', '--rfile', 'my_requirements.txt', '--level', 'cautious']) assert args.strategy_ini_file == 'my_strategy.ini' assert args.requirement_txt_file == 'my_requirements.txt' assert args.level is Level.CAUTIOUS def test_read_strategy(): args = parse_args(['--sfile', 'license_strategy.ini']) strategy = read_strategy(args.strategy_ini_file) assert len(strategy.AUTHORIZED_LICENSES) > 0 assert len(strategy.AUTHORIZED_PACKAGES) > 0 assert len(strategy.UNAUTHORIZED_LICENSES) > 0 def test_run(): args = parse_args(['--sfile', 'license_strategy.ini', '--rfile', 'requirements.txt']) run(args)
39.076923
111
0.699803
3c212cb082a29e3b83ca5b552c6298ad6f059324
2,090
py
Python
malib/envs/maatari/env.py
apexrl/malib
3785309e9b695ff359131fbbecabb6b5a52ef559
[ "MIT" ]
258
2021-05-10T11:17:45.000Z
2022-03-30T13:41:09.000Z
malib/envs/maatari/env.py
apexrl/malib
3785309e9b695ff359131fbbecabb6b5a52ef559
[ "MIT" ]
28
2021-05-13T06:50:04.000Z
2022-03-30T14:19:15.000Z
malib/envs/maatari/env.py
apexrl/malib
3785309e9b695ff359131fbbecabb6b5a52ef559
[ "MIT" ]
34
2021-05-31T16:17:49.000Z
2022-03-26T06:59:59.000Z
import logging import importlib import supersuit from malib.envs import Environment from malib.utils.typing import Dict, Any, Sequence, AgentID from malib.backend.datapool.offline_dataset_server import Episode def nested_env_creator(ori_creator: type, wrappers: Sequence[Dict]) -> type: """Wrap original atari environment creator with multiple wrappers""" def creator(**env_config): env = ori_creator(**env_config) # parse wrappers for wconfig in wrappers: name = wconfig["name"] params = wconfig["params"] wrapper = getattr( supersuit, name ) # importlib.import_module(f"supersuit.{env_desc['wrapper']['name']}") if isinstance(params, Sequence): env = wrapper(env, *params) elif isinstance(params, Dict): env = wrapper(env, **params) else: raise TypeError(f"Unexpected type: {type(params)}") return env return creator class MAAtari(Environment): def __init__(self, **configs): super().__init__(**configs) env_id = self._configs["env_id"] wrappers = self._configs.get("wrappers", []) scenario_configs = self._configs.get("scenario_configs", {}) env_module = env_module = importlib.import_module(f"pettingzoo.atari.{env_id}") ori_caller = env_module.parallel_env wrapped_caller = nested_env_creator(ori_caller, wrappers) self.is_sequential = False self._env = wrapped_caller(**scenario_configs) self._trainable_agents = self._env.possible_agents def step(self, actions: Dict[AgentID, Any]): observations, rewards, dones, infos = self._env.step(actions) return { Episode.NEXT_OBS: observations, Episode.REWARD: rewards, Episode.DONE: dones, Episode.INFO: infos, } def render(self, *args, **kwargs): pass def reset(self): observations = self._env.reset() return {Episode.CUR_OBS: observations}
32.153846
87
0.629187
5ac75695783137e1c057a7e8cb6a9d8bc83df574
744
py
Python
setup.py
nargyrop/pyna
a90e0f33846e10883ead8aaa710300f05f22f575
[ "MIT" ]
null
null
null
setup.py
nargyrop/pyna
a90e0f33846e10883ead8aaa710300f05f22f575
[ "MIT" ]
null
null
null
setup.py
nargyrop/pyna
a90e0f33846e10883ead8aaa710300f05f22f575
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages from pathlib import Path this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup( name='resens', version='0.2.2', description='Raster Processing package for Remote Sensing and Earth Observation', long_description=long_description, long_description_content_type='text/markdown', url='https://www.nargyrop.com', author='Nikos Argyropoulos', author_email='n.argiropeo@gmail.com', license='MIT', packages=find_packages('src'), package_dir={'': 'src'}, install_requires=[ 'gdal', 'numpy', 'opencv-python' ], python_requires='>=3.8', zip_safe=False )
29.76
87
0.665323
7a09a7f2ee83a5b5aa999984bd8f2d9e82626d1c
3,106
py
Python
climatetalk/hvac.py
kdschlosser/ClimateTalk
3b09a45c295cf5228283d7095834e8f133ed7de3
[ "MIT" ]
3
2021-04-30T20:12:16.000Z
2022-03-09T11:53:12.000Z
climatetalk/hvac.py
kdschlosser/ClimateTalk
3b09a45c295cf5228283d7095834e8f133ed7de3
[ "MIT" ]
null
null
null
climatetalk/hvac.py
kdschlosser/ClimateTalk
3b09a45c295cf5228283d7095834e8f133ed7de3
[ "MIT" ]
2
2021-04-08T18:29:39.000Z
2021-04-30T20:13:55.000Z
import datetime from .utils import ( set_bit as _set_bit, get_bit as _get_bit ) from .commands import ( # HVAC commands HVAC_HEAT_SET_POINT_TEMPERATURE_MODIFY, HVAC_COOL_SET_POINT_TEMPERATURE_MODIFY, HVAC_HEAT_PROFILE_CHANGE, HVAC_COOL_PROFILE_CHANGE, HVAC_SYSTEM_SWITCH_MODIFY, HVAC_PERMANENT_SET_POINT_TEMP_HOLD_MODIFY, HVAC_FAN_KEY_SELECTION, HVAC_HOLD_OVERRIDE, HVAC_BEEPER_ENABLE, HVAC_FAHRENHEIT_CELSIUS_DISPLAY, HVAC_COMFORT_RECOVERY_MODIFY, HVAC_REAL_TIME_DAY_OVERRIDE, HVAC_CHANGE_FILTER_TIME_REMAINING, HVAC_VACATION_MODE, HVAC_HIGH_ALARM_LIMIT_CHANGE, HVAC_LOW_ALARM_LIMIT_CHANGE, HVAC_HIGH_OUTDOOR_ALARM_LIMIT_CHANGE, HVAC_LOW_OUTDOOR_ALARM_LIMIT_CHANGE, HVAC_TEMP_DISPLAY_ADJ_FACTOR_CHANGE, HVAC_CLEAR_COMPRESSOR_RUN_TIME, HVAC_RESET_MICRO, HVAC_COMPRESSOR_LOCKOUT, HVAC_HOLD_RELEASE, HVAC_PROGRAM_INTERVAL_TYPE_MODIFICATION, HVAC_COMMUNICATIONS_RECEIVER_ON_OFF, HVAC_FORCE_PHONE_NUMBER_DISPLAY, HVAC_RESTORE_FACTORY_DEFAULTS, HVAC_CUSTOM_MESSAGE_AREA_DISPLAY_DATA, HVAC_SET_POINT_TEMP_AND_TEMPORARY_HOLD, HVAC_CONTINUOUS_DISPLAY_LIGHT, HVAC_ADVANCE_REAL_TIME_DAY_OVERRIDE, HVAC_KEYPAD_LOCKOUT, HVAC_TEST_MODE, HVAC_SUBSYSTEM_INSTALLATION_TEST, HVAC_SET_POINT_TEMP_TIME_HOLD, HVAC_COMFORT_MODE_MODIFICATION, HVAC_LIMITED_HEAT_AND_COOL_RANGE, HVAC_AUTO_PAIRING_REQUEST, HVAC_PAIRING_OWNERSHIP_REQUEST, HVAC_REVERSING_VALVE_CONFIG, HVAC_HUM_DEHUM_CONFIG, HVAC_CHANGE_UV_LIGHT_MAINTENANCE_TIMER, HVAC_CHANGE_HUMIDIFIER_PAD_MAINT_TIMERALL, HVAC_DAMPER_CLOSURE_POSITION_DEMAND, HVAC_SUBSYSTEM_BUSY_STATUS, HVAC_DEHUMIDIFICATION_DEMAND, HVAC_HUMIDIFICATION_DEMAND, HVAC_HEAT_DEMAND, HVAC_COOL_DEMAND, HVAC_FAN_DEMAND, HVAC_BACK_UP_HEAT_DEMAND, HVAC_DEFROST_DEMAND, HVAC_AUX_HEAT_DEMAND, HVAC_SET_MOTOR_SPEED, HVAC_SET_MOTOR_TORQUE, HVAC_SET_AIRFLOW_DEMAND, HVAC_SET_MODE, HVAC_SET_DEMAND_RAMP_RATE, HVAC_SET_MOTOR_DIRECTION, HVAC_SET_MOTOR_TORQUE_PERCENT, HVAC_SET_MOTOR_POSITION_DEMAND, HVAC_SET_BLOWER_COEFFICIENT_1, HVAC_SET_BLOWER_COEFFICIENT_2, HVAC_SET_BLOWER_COEFFICIENT_3, HVAC_SET_BLOWER_COEFFICIENT_4, HVAC_SET_BLOWER_COEFFICIENT_5, HVAC_SET_BLOWER_IDENTIFICATION_0, HVAC_SET_BLOWER_IDENTIFICATION_1, HVAC_SET_BLOWER_IDENTIFICATION_2, HVAC_SET_BLOWER_IDENTIFICATION_3, HVAC_SET_BLOWER_IDENTIFICATION_4, HVAC_SET_BLOWER_IDENTIFICATION_5, HVAC_SET_SPEED_LIMIT, HVAC_SET_TORQUE_LIMIT, HVAC_SET_AIRFLOW_LIMIT, HVAC_SET_POWER_OUTPUT_LIMIT, HVAC_SET_DEVICE_TEMPERATURE_LIMIT, HVAC_STOP_MOTOR_BY_BRAKING, HVAC_RUN_STOP_MOTOR, HVAC_SET_DEMAND_RAMP_TIME, HVAC_SET_INDUCER_RAMP_RATE, HVAC_SET_BLOWER_COEFFICIENT_6, HVAC_SET_BLOWER_COEFFICIENT_7, HVAC_SET_BLOWER_COEFFICIENT_8, HVAC_SET_BLOWER_COEFFICIENT_9, HVAC_SET_BLOWER_COEFFICIENT_10, HVAC_PUBLISH_PRICE, ) from .packet import (SetControlCommandRequest, SetControlCommandResponse)
31.06
73
0.814874
469777cc3839fee97bea2e884359ab09aa5ff00c
11,108
py
Python
pytorch_named_dims/utils/__init__.py
stared/pytorch-named-dims
86c83177cd0e22fcae3b356be71044e332c6fa16
[ "MIT" ]
null
null
null
pytorch_named_dims/utils/__init__.py
stared/pytorch-named-dims
86c83177cd0e22fcae3b356be71044e332c6fa16
[ "MIT" ]
null
null
null
pytorch_named_dims/utils/__init__.py
stared/pytorch-named-dims
86c83177cd0e22fcae3b356be71044e332c6fa16
[ "MIT" ]
1
2020-06-17T08:54:22.000Z
2020-06-17T08:54:22.000Z
from typing import Type, Tuple, List, Optional, Iterable import torch from torch import nn DimensionNames = List[Optional[str]] # helper functions def split_names(names_operation: DimensionNames) -> Tuple[DimensionNames, DimensionNames]: """Splits dimension names in ones before and after '*'. Arguments: names_operation {DimensionNames} -- e.g. ['N', '*', 'C'] or ['L', 'N', 'C'] Raises: ValueError: If there is more than one '*' in the list. Returns: Tuple[DimensionNames, DimensionNames] -- (names_before_ast, names_after_ast), if there is no ast, then (original_list, []). Examples: ['N', '*', 'C'] -> ['N'], ['C']. ['L', 'N', 'C'] -> ['L', 'N', 'C'], []. """ asterisks = sum(name == '*' for name in names_operation) if asterisks == 0: return names_operation[:], [] elif asterisks == 1: split = names_operation.index('*') return names_operation[:split], names_operation[split + 1:] else: raise ValueError("More than one '*' in module dimension names.") def names_compatible(names1: DimensionNames, names2: DimensionNames) -> bool: """Checks if two dimensions names are compatible. To be compatible they need to have the same length, and same names for all dimensions. If there is None, it is compatible with any name. Arguments: names1 {DimensionNames} -- e.g. ['N', 'C', 'W', 'H'] names2 {DimensionNames} -- e.g. [None, 'C', 'W', None] Returns: bool -- True if they are. Examples: ['N', 'C', 'W', 'H'], [None, 'C', 'W', None] -> True. ['N', 'C', 'W', 'H'], ['N', 'C', 'H', None] -> False. """ if len(names1) != len(names2): return False else: for name1, name2 in zip(names1, names2): if name1 is not None and name2 is not None and name1 != name2: return False return True def split_and_compare(names_in: DimensionNames, x_names: DimensionNames, layer_name: str = "") -> DimensionNames: """Splits x_names (name) Arguments: names_in {DimensionNames} -- Declared input dimensions names, can contain zero or one '*'. x_names {DimensionNames} -- Actual input dimension names, they neet do be explicit (cannot contain '*'). Keyword Arguments: layer_name {str} -- Layer name for error message (default: {""}). Raises: ValueError: If there is Returns: DimensionNames -- Dimension names from x_names as matched by '*' in names_in. Examples: ['N', '*', 'C'], ['N', 'D', 'L', C'] -> ['D', 'L']. ['N', '*', 'C'], ['N', 'C'] -> []. ['N', '*', 'C'], ['N', 'C', 'L'] -> ValueError. """ error_msg = f"Layer {layer_name} requires dimensions {names_in} but got {x_names} instead." names_in_first, names_in_last = split_names(names_in) if len(names_in_first) + len(names_in_last) == len(names_in): # case of no '*' if not names_compatible(names_in, x_names): raise ValueError(error_msg) return [] else: # case of '*' names_first = x_names[:len(names_in_first)] names_middle = x_names[len(names_in_first):len(x_names) - len(names_in_last)] names_last = x_names[len(x_names) - len(names_in_last):] if not names_compatible(names_in_first, names_first) or not names_compatible(names_in_last, names_last): raise ValueError(error_msg) return names_middle def name_list_str(names: DimensionNames) -> str: """Turns dimension names in a string, for output purposes (__doc__, __str__, etc). Works the best for one-letter dimensions (otherwise it gets ambiguous). Turns None into _. Arguments: names {DimensionNames} -- e.g. ['N', None, 'H', 'W']. Returns: str -- e.g. 'N_HW'. """ return "".join(name if name is not None else '_' for name in names) # turning an nn.Module module instance class NamedModuleWrapper(nn.Module): def __init__(self, module: nn.Module, names_in: DimensionNames, names_out: Optional[DimensionNames] = None): """A wrapper that turns a normal torch.nn.Module instance into one that has typed input and output for the first argument. Arguments: module {nn.Module} -- An nn.Module instance (e.g. conv = nn.Conv2d(...), or a custom one). names_in {DimensionNames} -- Dimension names for the first input of forward method, e.g. ['N', 'C', 'L'], ['N', '*', 'C']. Keyword Arguments: names_out {Optional[DimensionNames]} -- Output dimension names. If not provided, uses names_in (default: {None}). As of now, restricted to a single output. """ super().__init__() self.module = module self.names_in = names_in self.names_out = names_out if names_out is not None else names_in def forward(self, x, *args, **kwargs): names_middle = split_and_compare(self.names_in, list(x.names), layer_name=self.module._get_name()) x = x.rename(None) x = self.module.forward(x, *args, **kwargs) names_out_first, names_out_last = split_names(self.names_out) x = x.refine_names(*names_out_first, *names_middle, *names_out_last) return x # turning an nn.Module class into a named class def name_module_class( old_module_class: type, names_in: List[DimensionNames], names_out: Optional[DimensionNames] = None, reduce_option: bool = False ) -> type: """Converts a nn.Module class into one with the same numerics, but that checks dimension names. Arguments: old_module_class {type} -- A class inheriting from torch.nn.Module (e.g. nn.Conv2d, or any custom). names_in {List[DimensionNames]} -- Dimension names for each input. E.g. [['N', 'C', 'H', 'W']] for a single input. It can use '*' once, and it takes any number of dimensions (including zero), e.g. [['N', '*', 'C']]. Keyword Arguments: names_out {Optional[DimensionNames]} -- Output dimension names (assumes a single tensor). If None, uses the same as the first input. (default: {None}) reduce_option {bool} -- Set True if it is a loss function and has 'reduction' option that changes output dimensions. (default: {False}) Returns: type -- A class, with the nn.Module interface. """ names_out = names_out if names_out is not None else names_in[0] signature_output = name_list_str(names_out) signature_inputs = ','.join(name_list_str(names_in_i) for names_in_i in names_in) signature = f"{signature_inputs} -> {signature_output}{' or scalar' if reduce_option else ''}" class NamedModule(old_module_class): # TO FIX: mypy shows error """With dimension names.""" __doc__ = f"{old_module_class.__name__} with named dimensions: {signature}.\n{old_module_class.__doc__}" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.names_in = names_in if reduce_option and self.reduction != 'none': self.names_out = [] else: self.names_out = names_out def forward(self, *args, **kwargs): for x, names_in_i in zip(args, self.names_in): names_middle = split_and_compare(names_in_i, list(x.names), layer_name=self._get_name()) args = [x.rename(None) for x in args] x = super().forward(*args, **kwargs) if '*' in self.names_out: names_out_first, names_out_last = split_names(self.names_out) x = x.refine_names(*names_out_first, *names_middle, *names_out_last) else: x = x.refine_names(*self.names_out) return x def __repr__(self): return f"{super().__repr__()} {signature}" NamedModule.__name__ = f"Named{old_module_class.__name__}" NamedModule.__qualname__ = f"Named{old_module_class.__name__}" return NamedModule def name_rnn_module_class( old_module_class: type, names: Tuple[DimensionNames, DimensionNames], names_memory: Tuple[DimensionNames, DimensionNames] ) -> type: """Converts a RNN nn.Module class into one with the same numerics, but that checks dimension names. Arguments: old_module_class {type} -- nn.RNN, nn.GRU, nn.LSTM or related class inheriting from torch.nn.Module. names {Tuple[DimensionNames, DimensionNames]} -- Dimension names of the main input=output for batch_first False then True. Usually (['L', 'N', 'C'], ['N', 'L', 'C']). names_memory {Tuple[DimensionNames,DimensionNames]} -- Dimension names of the memory cells input=output for batch_first False then True. Usually (['S', 'N', 'C'], ['S', 'N', 'C']) -- not a typo, both are the same. Returns: type -- A class, with the nn.Module interface. """ class NamedModule(old_module_class): # TO FIX: mypy shows error """With dimension names.""" __doc__ = f"{old_module_class.__name__} with named dimensions." def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.batch_first: self.names = names[1] self.names_memory = names_memory[1] else: self.names = names[0] self.names_memory = names_memory[0] # think if that is needed, and in which format self.names_in = (self.names, self.names_memory) self.names_out = (self.names, self.names_memory) def forward(self, x: torch.Tensor, h: Optional[torch.Tensor] = None): error_msg = f"Layer {self._get_name()} requires dimensions {self.names}, {self.names_memory} but got {x.names} instead." if not names_compatible(self.names, list(x.names)): raise ValueError(error_msg) if isinstance(h, torch.Tensor) and not names_compatible(self.names_memory, list(h.names)): raise ValueError(error_msg) elif isinstance(h, tuple) and not all(names_compatible(self.names_memory, list(hi.names)) for hi in h): raise ValueError(error_msg) x = x.rename(None) if isinstance(h, torch.Tensor): h = h.rename(None) elif isinstance(h, tuple): h = tuple(hi.rename(None) for hi in h) x, h = super().forward(x, h) x = x.refine_names(*self.names) if isinstance(h, torch.Tensor): h = h.refine_names(*self.names_memory) elif isinstance(h, tuple): h = tuple(hi.refine_names(*self.names_memory) for hi in h) return x, h def __repr__(self): return f"{super().__repr__()} {name_list_str(self.names)}" NamedModule.__name__ = f"Named{old_module_class.__name__}" NamedModule.__qualname__ = f"Named{old_module_class.__name__}" return NamedModule
42.075758
158
0.615142
c87bf719e2af8af96610dac48e29f95df766f74f
620
py
Python
v3/scripts/testing/empireofcode/crystal-row.py
TheShellLand/python
a35e9b32bec3a3ff03d6f0f4c2c2cc891180e516
[ "MIT" ]
null
null
null
v3/scripts/testing/empireofcode/crystal-row.py
TheShellLand/python
a35e9b32bec3a3ff03d6f0f4c2c2cc891180e516
[ "MIT" ]
1
2021-06-01T22:50:19.000Z
2021-06-01T22:50:19.000Z
v3/scripts/testing/empireofcode/crystal-row.py
TheShellLand/python
a35e9b32bec3a3ff03d6f0f4c2c2cc891180e516
[ "MIT" ]
null
null
null
def check_line(line): print(line) index = 0 while index + 1 < len(line): _current = line[index] if index + 1 < len(line): _next = line[index + 1] if _current == _next: print(_current, _next, 'False') return False if _current != _next: print(_current, _next, 'True') index = index + 1 return True if __name__ == '__main__': #line = ["Z", "X", "Z", "X", "Z", "Z", "X"] line = ["Z", "X", "Z"] check_line(line) # These "asserts" using only for self-checking and not necessary for auto-testing
22.962963
85
0.514516
54c6aa6417172638ad30a9d43693368d93a87431
3,565
py
Python
my_configs/cattle/resnet_50/leg_front_black.py
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
my_configs/cattle/resnet_50/leg_front_black.py
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
my_configs/cattle/resnet_50/leg_front_black.py
EricChan-OC/mmpose
b7f36b29eb077b69791675cce39dfcc23c658647
[ "Apache-2.0" ]
null
null
null
log_level = 'INFO' load_from = None resume_from = None dist_params = dict(backend='nccl') workflow = [('train', 1)] checkpoint_config = dict(interval=100) evaluation = dict(interval=5, metric='PCK', key_indicator='PCK') COLOR = 'black' optimizer = dict( type='Adam', lr=5e-4, ) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[170, 200]) total_epochs = 210 log_config = dict( interval=1, hooks=[ dict(type='TextLoggerHook'), ]) channel_cfg = dict( num_output_channels=9, dataset_joints=9, dataset_channel=[ [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], ], inference_channel=[ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]) # model settings model = dict( type='TopDown', pretrained='torchvision://resnet50', backbone=dict(type='ResNet', depth=50), keypoint_head=dict( type='TopDownSimpleHead', in_channels=2048, out_channels=channel_cfg['num_output_channels'], loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)), train_cfg=dict(), test_cfg=dict( flip_test=True, post_process='default', shift_heatmap=True, modulate_kernel=11)) data_cfg = dict( image_size=[256, 256], heatmap_size=[64, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel']) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), dict( type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5), dict(type='TopDownAffine'), dict(type='ToTensor'), dict( type='NormalizeTensor', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict(type='TopDownGenerateTarget', sigma=2), dict( type='Collect', keys=['img', 'target', 'target_weight'], meta_keys=[ 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale', 'rotation', 'bbox_score', 'flip_pairs' ]), ] val_pipeline = [ dict(type='LoadImageFromFile'), dict(type='TopDownAffine'), dict(type='ToTensor'), dict( type='NormalizeTensor', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict( type='Collect', keys=['img'], meta_keys=[ 'image_file', 'center', 'scale', 'rotation', 'bbox_score', 'flip_pairs' ]), ] test_pipeline = val_pipeline dataset_type = 'AnimalHorse10Dataset' data_root = 'data/cattle/training_data/cattle_leg_front_256_'+COLOR data = dict( samples_per_gpu=32, workers_per_gpu=2, val_dataloader=dict(samples_per_gpu=32), test_dataloader=dict(samples_per_gpu=32), train=dict( type=dataset_type, ann_file=f'{data_root}/annotations/train.json', img_prefix=f'{data_root}/images/', data_cfg=data_cfg, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=f'{data_root}/annotations/test.json', img_prefix=f'{data_root}/images/', data_cfg=data_cfg, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=f'{data_root}/annotations/test.json', img_prefix=f'{data_root}/images/', data_cfg=data_cfg, pipeline=test_pipeline), )
27.21374
79
0.624965
9c02aca6e9a92ec02002893e119c3d69d64d31af
2,129
py
Python
model.py
aliyun/The-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos
aca214c56fc05778a1f9f382c2f634cbeca4d852
[ "MIT" ]
18
2021-05-25T13:05:25.000Z
2022-03-21T01:37:44.000Z
model.py
aliyun/The-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos
aca214c56fc05778a1f9f382c2f634cbeca4d852
[ "MIT" ]
2
2021-09-17T15:35:48.000Z
2022-03-09T01:32:47.000Z
model.py
aliyun/The-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos
aca214c56fc05778a1f9f382c2f634cbeca4d852
[ "MIT" ]
2
2021-11-20T05:12:19.000Z
2021-12-15T03:07:19.000Z
import torch import torch.nn as nn from torch.nn import init from torch.nn.utils import weight_norm import torch.nn.functional as F import numpy as np import math project_num = 5 class Deconfounder(nn.Module): def __init__(self, len_feature, num_classes, num_segments): super(Deconfounder, self).__init__() self.len_feature = len_feature self.num_classes = num_classes self.conv_diverse_weight = nn.Parameter(torch.randn(project_num,len_feature,1)) nn.init.kaiming_uniform_(self.conv_diverse_weight, a=math.sqrt(5)) kernel = [-1,1] self.kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0) self.kernel = torch.repeat_interleave(self.kernel,repeats=project_num,dim=0) self.kernel = nn.Parameter(data = self.kernel,requires_grad=False) def forward(self, x): batch_size = x.shape[0] x_permute = x.permute(0, 2, 1) features_div = F.conv1d(x_permute,self.conv_diverse_weight/\ torch.norm(self.conv_diverse_weight,dim=1,keepdim=True),padding=0) features_div_relation = F.conv1d(features_div,self.kernel,groups=project_num) conv_diverse_norm = torch.norm(self.conv_diverse_weight) projectors = torch.squeeze(self.conv_diverse_weight/\ torch.norm(self.conv_diverse_weight,dim=1,keepdim=True)) if project_num>1: orthogonal = torch.matmul(projectors,torch.transpose(projectors,1,0)) - torch.eye(project_num).cuda() orthogonal = torch.sum(torch.pow(orthogonal,2)) else: orthogonal = torch.sum(projectors-projectors) features_div_T = torch.transpose(features_div,2,1) feature_reconst = torch.matmul(features_div_T,torch.squeeze(self.conv_diverse_weight/\ torch.norm(self.conv_diverse_weight,dim=1,keepdim=True))) loss_reconst = torch.sum((feature_reconst - x) * (feature_reconst - x)) loss_reconst = loss_reconst/(feature_reconst.shape[0]*feature_reconst.shape[1]) return features_div,features_div_relation,conv_diverse_norm,orthogonal,loss_reconst
38.017857
113
0.703617
dfc7331a637eba706d44a7f3cbb15bd60caa3135
152
py
Python
alibi_detect/ad/__init__.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
1
2021-01-19T09:13:12.000Z
2021-01-19T09:13:12.000Z
alibi_detect/ad/__init__.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
null
null
null
alibi_detect/ad/__init__.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
null
null
null
from .adversarialae import AdversarialAE from .model_distillation import ModelDistillation __all__ = ["AdversarialAE", "ModelDistillation"]
25.333333
49
0.776316
8068de29f7cef763b8f30a674b0263a68ad8cc6b
7,816
py
Python
conda/cli/main_remove.py
javabrett/conda
61fe720c94f698b9a1c42b8fbdf99ba01b07f4a5
[ "BSD-3-Clause" ]
6
2017-09-09T04:47:49.000Z
2020-09-30T11:20:46.000Z
conda/cli/main_remove.py
javabrett/conda
61fe720c94f698b9a1c42b8fbdf99ba01b07f4a5
[ "BSD-3-Clause" ]
2
2016-11-08T22:21:58.000Z
2017-05-03T07:06:18.000Z
conda/cli/main_remove.py
javabrett/conda
61fe720c94f698b9a1c42b8fbdf99ba01b07f4a5
[ "BSD-3-Clause" ]
3
2019-05-22T01:27:00.000Z
2019-10-07T02:01:15.000Z
# -*- coding: utf-8 -*- # Copyright (C) 2012 Anaconda, Inc # SPDX-License-Identifier: BSD-3-Clause from __future__ import absolute_import, division, print_function, unicode_literals import logging import sys from .common import check_non_admin, specs_from_args from .install import handle_txn from ..base.context import context from ..core.envs_manager import unregister_env from ..core.link import PrefixSetup, UnlinkLinkTransaction from ..core.prefix_data import PrefixData from ..core.solve import Solver from ..exceptions import CondaEnvironmentError, CondaValueError from ..gateways.disk.delete import rm_rf, path_is_clean from ..models.match_spec import MatchSpec log = logging.getLogger(__name__) def execute(args, parser): if not (args.all or args.package_names): raise CondaValueError('no package names supplied,\n' ' try "conda remove -h" for more details') prefix = context.target_prefix check_non_admin() if args.all and prefix == context.default_prefix: msg = "cannot remove current environment. deactivate and run conda remove again" raise CondaEnvironmentError(msg) if args.all and path_is_clean(prefix): # full environment removal was requested, but environment doesn't exist anyway # .. but you know what? If you call `conda remove --all` you'd expect the dir # not to exist afterwards, would you not? If not (fine, I can see the argument # about deleting people's work in envs being a very bad thing indeed), but if # being careful is the goal it would still be nice if after `conda remove --all` # to be able to do `conda create` on the same environment name. # # try: # rm_rf(prefix, clean_empty_parents=True) # except: # log.warning("Failed rm_rf() of partially existent env {}".format(prefix)) return 0 if args.all: if prefix == context.root_prefix: raise CondaEnvironmentError('cannot remove root environment,\n' ' add -n NAME or -p PREFIX option') print("\nRemove all packages in environment %s:\n" % prefix, file=sys.stderr) if 'package_names' in args: stp = PrefixSetup( target_prefix=prefix, unlink_precs=tuple(PrefixData(prefix).iter_records()), link_precs=(), remove_specs=(), update_specs=(), ) txn = UnlinkLinkTransaction(stp) handle_txn(txn, prefix, args, False, True) rm_rf(prefix, clean_empty_parents=True) unregister_env(prefix) return else: if args.features: specs = tuple(MatchSpec(track_features=f) for f in set(args.package_names)) else: specs = specs_from_args(args.package_names) channel_urls = () subdirs = () solver = Solver(prefix, channel_urls, subdirs, specs_to_remove=specs) txn = solver.solve_for_transaction() handle_txn(txn, prefix, args, False, True) # Keep this code for dev reference until private envs can be re-enabled in # Solver.solve_for_transaction # specs = None # if args.features: # specs = [MatchSpec(track_features=f) for f in set(args.package_names)] # actions = remove_actions(prefix, specs, index, pinned=not context.ignore_pinned) # actions['ACTION'] = 'REMOVE_FEATURE' # action_groups = (actions, index), # elif args.all: # if prefix == context.root_prefix: # raise CondaEnvironmentError('cannot remove root environment,\n' # ' add -n NAME or -p PREFIX option') # actions = defaultdict(list) # actions[PREFIX] = prefix # for dist in sorted(iterkeys(index)): # add_unlink(actions, dist) # actions['ACTION'] = 'REMOVE_ALL' # action_groups = (actions, index), # elif prefix == context.root_prefix and not context.prefix_specified: # from ..core.envs_manager import EnvsDirectory # ed = EnvsDirectory(join(context.root_prefix, 'envs')) # get_env = lambda s: ed.get_registered_preferred_env(MatchSpec(s).name) # specs = specs_from_args(args.package_names) # env_spec_map = groupby(get_env, specs) # action_groups = [] # for env_name, spcs in iteritems(env_spec_map): # pfx = ed.to_prefix(env_name) # r = get_resolve_object(index.copy(), pfx) # specs_to_remove = tuple(MatchSpec(s) for s in spcs) # prune = pfx != context.root_prefix # dists_for_unlinking, dists_for_linking = solve_for_actions( # pfx, r, # specs_to_remove=specs_to_remove, prune=prune, # ) # actions = get_blank_actions(pfx) # actions['UNLINK'].extend(dists_for_unlinking) # actions['LINK'].extend(dists_for_linking) # actions['SPECS'].extend(text_type(s) for s in specs_to_remove) # actions['ACTION'] = 'REMOVE' # action_groups.append((actions, r.index)) # action_groups = tuple(action_groups) # else: # specs = specs_from_args(args.package_names) # if sys.prefix == abspath(prefix) and names_in_specs(ROOT_NO_RM, specs) and not args.force: # NOQA # raise CondaEnvironmentError('cannot remove %s from root environment' % # ', '.join(ROOT_NO_RM)) # action_groups = (remove_actions(prefix, list(specs), index=index, # force=args.force, # pinned=not context.ignore_pinned, # ), index), # # # delete_trash() # if any(nothing_to_do(x[0]) for x in action_groups): # if args.all: # print("\nRemove all packages in environment %s:\n" % prefix, file=sys.stderr) # if not context.json: # confirm_yn(args) # rm_rf(prefix) # # if context.json: # stdout_json({ # 'success': True, # 'actions': tuple(x[0] for x in action_groups) # }) # return # # pkg = str(args.package_names).replace("['", "") # pkg = pkg.replace("']", "") # # error_message = "No packages named '%s' found to remove from environment." % pkg # raise PackageNotFoundError(error_message) # if not context.json: # for actions, ndx in action_groups: # print() # print("Package plan for package removal in environment %s:" % actions["PREFIX"]) # display_actions(actions, ndx) # elif context.json and args.dry_run: # stdout_json({ # 'success': True, # 'dry_run': True, # 'actions': tuple(x[0] for x in action_groups), # }) # return # # if not context.json: # confirm_yn(args) # # for actions, ndx in action_groups: # if context.json and not context.quiet: # with json_progress_bars(): # execute_actions(actions, ndx, verbose=not context.quiet) # else: # execute_actions(actions, ndx, verbose=not context.quiet) # # target_prefix = actions["PREFIX"] # if is_private_env_path(target_prefix) and linked_data(target_prefix) == {}: # rm_rf(target_prefix) # # if args.all: # rm_rf(prefix) # # if context.json: # stdout_json({ # 'success': True, # 'actions': tuple(x[0] for x in action_groups), # })
40.497409
108
0.591223
e536bc5fc5cf22b5e8132ffc6a54cb7f28ffed6d
555
py
Python
tokopedia/response.py
hexatester/tokopedia
20e46c3ec2c70de6b24460634b7c185ffdb15691
[ "MIT" ]
5
2021-07-01T05:09:20.000Z
2022-03-06T10:53:07.000Z
tokopedia/response.py
hexatester/tokopedia
20e46c3ec2c70de6b24460634b7c185ffdb15691
[ "MIT" ]
null
null
null
tokopedia/response.py
hexatester/tokopedia
20e46c3ec2c70de6b24460634b7c185ffdb15691
[ "MIT" ]
1
2022-02-14T01:20:34.000Z
2022-02-14T01:20:34.000Z
import attr from typing import List, Optional, Union @attr.dataclass(slots=True) class BaseResponseHeader: process_time: float messages: str class ResponseHeader(BaseResponseHeader): pass @attr.dataclass(slots=True) class ErrorResponseHeader(BaseResponseHeader): reason: str error_code: str @attr.dataclass(slots=True) class TokopediaResponse: header: Union[ResponseHeader, ErrorResponseHeader, None] @attr.dataclass(slots=True) class TokopediaResponseV2: status: Optional[str] error_message: Optional[List[str]]
18.5
60
0.767568
371fe52d0071a5ea8d99ffa23b9af02850bd5072
796
py
Python
python/scrapy/OfficialSpider/tutorial/tutorial/spiders/quotes_xpath.py
RitamDey/My-Simple-Programs
147b455a6a40c371ec894ce979e8a61d242e03bd
[ "Unlicense" ]
2
2016-10-14T16:58:05.000Z
2017-05-04T04:59:18.000Z
python/scrapy/OfficialSpider/tutorial/tutorial/spiders/quotes_xpath.py
GreenJoey/My-Simple-Programs
147b455a6a40c371ec894ce979e8a61d242e03bd
[ "Unlicense" ]
null
null
null
python/scrapy/OfficialSpider/tutorial/tutorial/spiders/quotes_xpath.py
GreenJoey/My-Simple-Programs
147b455a6a40c371ec894ce979e8a61d242e03bd
[ "Unlicense" ]
null
null
null
from scarpy import Spider class QuotesXpathSpider(Spider): name = 'quotes-xpath' start_urls = [ 'http://quotes.toscrape.com/' ] def parse(self, response): for quote in response.xpath('//div[@class="quote"]'): yield { 'text': quote.xpath('./span[@class="text"]/text()').extract_first(), 'author': quote.xpath('.//small[@class="author"]/text()').extract_first(), 'tags': quote.xpath('.//div[@class="tags"]/a[@class="tag"]/text()').extract_first() } next_page_url = response.xpath('//li[@class="next"]/a/@href').extract_first() if next_page_url is not None: yield scrapy.Request( response.urljoin(next_page_url) )
33.166667
99
0.536432
59bd3ed904a16077a6b3b49ba3a6ed8417a21589
4,109
py
Python
pep263/core.py
B3QL/pep263
ca1f3772f59bb5b5eef9bf8ba41c21f823778895
[ "MIT" ]
1
2018-06-15T13:08:10.000Z
2018-06-15T13:08:10.000Z
pep263/core.py
B3QL/pep263
ca1f3772f59bb5b5eef9bf8ba41c21f823778895
[ "MIT" ]
null
null
null
pep263/core.py
B3QL/pep263
ca1f3772f59bb5b5eef9bf8ba41c21f823778895
[ "MIT" ]
null
null
null
"""Core functionalities.""" import logging import re from collections import namedtuple from .decorators import seek_file from .errors import EncodingError try: from os import scandir except ImportError: from scandir import scandir logger = logging.getLogger(__name__) # Official regex from https://www.python.org/dev/peps/pep-0263/ ENCODING_PATTERN = re.compile("^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)") ENCODING_LINE = '# -*- coding: {encoding_name} -*-\n' EncodingInfo = namedtuple('EncodingInfo', 'name lineno error') def find_encoding(filename): """Find information about encoding in file.""" try: with open(filename, 'r') as f: encoding_info = _find_file_encoding(f) return encoding_info except PermissionError: logger.error('Cannot open file %s', filename) return EncodingInfo('permission denied', -1, EncodingError.PERMISSION_ERROR) except FileNotFoundError: logger.error('File not found %s', filename) return EncodingInfo('no encoding', -1, EncodingError.NOT_FOUND) except LookupError as exc: msg = str(exc).capitalize() logger.warning('%s in %s', msg, filename) return EncodingInfo('no encoding', -1, EncodingError.NOT_FOUND) except ValueError as exc: msg = str(exc).capitalize() logger.warning('%s in %s', msg, filename) return EncodingInfo('invalid encoding', -1, EncodingError.INVALID_ENCODING) def _find_file_encoding(f, lineno=1): if lineno > 2: raise LookupError('encoding not found') line = f.readline() line_match = re.search(ENCODING_PATTERN, line) if line_match: encoding_name = validate_encoding(line_match.group(1)) return EncodingInfo(name=encoding_name, lineno=lineno, error=EncodingError.OK) return _find_file_encoding(f, lineno + 1) def validate_encoding(encoding_name): """Validate encoding name.""" try: import codecs codecs.lookup(encoding_name) except LookupError as e: raise ValueError(e) return encoding_name def search_files(path): """Find recursively python files in path.""" files = [] try: for entry in scandir(path): if _is_py_file(entry): files.append(entry.path) elif entry.is_dir(): subdir_files = search_files(entry.path) files.extend(subdir_files) except PermissionError as exc: logger.error('Cannot open %s', exc.filename) except FileNotFoundError: logger.error('Directory not found %s', path) except NotADirectoryError: files.append(path) return files def _is_py_file(entry): return entry.is_file() and entry.name.endswith('.py') def append_encoding(filename, encoding_name, force=False): """Append encoding to file.""" try: with open(filename, 'r+') as f: _append_file_encoding(f, encoding_name, force) except PermissionError: logger.error('Cannot open a file %s', filename) except FileNotFoundError: logger.error('File not found %s', filename) except IsADirectoryError: logger.error('Not a file %s', filename) def _append_file_encoding(f, encoding_name, replace=False): encoding_name = validate_encoding(encoding_name) encoding_line = ENCODING_LINE.format(**locals()) try: encoding_info = _find_file_encoding(f) except LookupError: lineno = _find_lineno(f) else: if replace: lineno = encoding_info.lineno else: raise RuntimeError('encoding already exists') _write_line(f, lineno, encoding_line, replace) @seek_file def _find_lineno(f): first_line = f.readline() shebang = first_line.startswith('#!') return 2 if shebang else 1 @seek_file def _write_line(f, lineno, content, replace): file_content = f.readlines() line_index = lineno - 1 if replace: file_content[line_index] = content else: file_content.insert(line_index, content) f.seek(0) f.writelines(file_content)
29.775362
86
0.666586
0ba96a49ee2a47b66e2d8e3cc64349fcbf227836
440
py
Python
test_gspread.py
yeongseon/django_beautifulseodang
c79febd2addc67094e696aa731677a72048e4482
[ "MIT" ]
1
2021-11-22T05:09:00.000Z
2021-11-22T05:09:00.000Z
test_gspread.py
yeongseon/django_beautifulseodang
c79febd2addc67094e696aa731677a72048e4482
[ "MIT" ]
null
null
null
test_gspread.py
yeongseon/django_beautifulseodang
c79febd2addc67094e696aa731677a72048e4482
[ "MIT" ]
null
null
null
import gspread from oauth2client.service_account import ServiceAccountCredentials # use creds to create a client to interact with the Google Drive API scope = ['https://spreadsheets.google.com/feeds'] creds = ServiceAccountCredentials.from_json_keyfile_name('Beautifulseodang-c0a5cc61a537.json', scope) client = gspread.authorize(creds) sheet = client.open("test").sheet1 list_of_hashes = sheet.get_all_records() print (list_of_hashes)
31.428571
101
0.815909
154d1c15999ee7b43c1a9eef7288fda11804ce4a
475
py
Python
app/__init__.py
brianraila/fastapi-peewee-api
f2a4b4e0a42dee981758547838d9e6b473c74ce2
[ "MIT" ]
null
null
null
app/__init__.py
brianraila/fastapi-peewee-api
f2a4b4e0a42dee981758547838d9e6b473c74ce2
[ "MIT" ]
null
null
null
app/__init__.py
brianraila/fastapi-peewee-api
f2a4b4e0a42dee981758547838d9e6b473c74ce2
[ "MIT" ]
null
null
null
import os from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware def create_app(): app = FastAPI() # app.config.from_mapping( SECRET_KEY=os.environ.get('SECRET_KEY') or 'you-will-never-guess',) from app.routes import index app.include_router(index.router) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],) return app app = create_app()
23.75
127
0.709474
1bb8bedc38e3e08ae2071a89a038afef9f81932e
1,243
py
Python
python/2015/aoc_15_b.py
nickspoons/adventofcode
4aef222ccc9dcea09b4010d65d5f9cd32683323a
[ "MIT" ]
null
null
null
python/2015/aoc_15_b.py
nickspoons/adventofcode
4aef222ccc9dcea09b4010d65d5f9cd32683323a
[ "MIT" ]
null
null
null
python/2015/aoc_15_b.py
nickspoons/adventofcode
4aef222ccc9dcea09b4010d65d5f9cd32683323a
[ "MIT" ]
null
null
null
""" Advent of Code, 2015: Day 15, b """ import re with open(__file__[:-5] + "_input") as f: inputs = [line.strip() for line in f] def score(ingredients, teaspoons): """ Find the product of the 500 calory ingredient scores """ calories = 0 for j, spoons in enumerate(teaspoons): calories += ingredients[j][4] * spoons if calories != 500: return 0 product = 1 for i in range(4): prop = 0 for j, spoons in enumerate(teaspoons): prop += ingredients[j][i] * spoons if prop <= 0: # The final product will be 0, so exit early return 0 product *= prop return product def run(): """ Enumerate through all measurement combos adding up to 100 """ ingredients = [] for line in inputs: pattern = r"^\w+: " + ", ".join(5 * [r"\w+ (-?\d+)"]) match = re.match(pattern, line) ingredients.append([int(match.group(i)) for i in range(1, 6)]) scores = [] for a in range(101): for b in range(101 - a): for c in range(101 - a - b): scores.append(score(ingredients, [a, b, c, 100 - a - b - c])) return max(scores) if __name__ == "__main__": print(run())
27.021739
77
0.548673
a4ddc1e4206950125cceea161d2f59636235fcd0
2,641
py
Python
tools/telemetry/telemetry/page/page_set.py
kjthegod/chromium
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2019-11-28T10:46:52.000Z
2019-11-28T10:46:52.000Z
tools/telemetry/telemetry/page/page_set.py
kjthegod/chromium
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
tools/telemetry/telemetry/page/page_set.py
kjthegod/chromium
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2015-03-27T11:15:39.000Z
2016-08-17T14:19:56.000Z
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import csv import inspect import os from telemetry.page import page as page_module from telemetry.user_story import user_story_set from telemetry.util import cloud_storage PUBLIC_BUCKET = cloud_storage.PUBLIC_BUCKET PARTNER_BUCKET = cloud_storage.PARTNER_BUCKET INTERNAL_BUCKET = cloud_storage.INTERNAL_BUCKET class PageSetError(Exception): pass class PageSet(user_story_set.UserStorySet): def __init__(self, file_path=None, archive_data_file='', user_agent_type=None, serving_dirs=None, bucket=None): # The default value of file_path is location of the file that define this # page set instance's class. # TODO(chrishenry): Move this logic to user_story_set. Consider passing # a base_dir directly. Alternatively, kill this and rely on the default # behavior of using the instance's class file location. if file_path is None: file_path = inspect.getfile(self.__class__) # Turn pyc file into py files if we can if file_path.endswith('.pyc') and os.path.exists(file_path[:-1]): file_path = file_path[:-1] self.file_path = file_path super(PageSet, self).__init__( archive_data_file=archive_data_file, cloud_storage_bucket=bucket, serving_dirs=serving_dirs) # These attributes can be set dynamically by the page set. self.user_agent_type = user_agent_type @property def pages(self): return self.user_stories def AddUserStory(self, user_story): assert isinstance(user_story, page_module.Page) assert user_story.page_set is self super(PageSet, self).AddUserStory(user_story) @property def base_dir(self): if os.path.isfile(self.file_path): return os.path.dirname(self.file_path) else: return self.file_path def ReorderPageSet(self, results_file): """Reorders this page set based on the results of a past run.""" page_set_dict = {} for page in self.user_stories: page_set_dict[page.url] = page user_stories = [] with open(results_file, 'rb') as csv_file: csv_reader = csv.reader(csv_file) csv_header = csv_reader.next() if 'url' not in csv_header: raise Exception('Unusable results_file.') url_index = csv_header.index('url') for csv_row in csv_reader: if csv_row[url_index] in page_set_dict: self.AddUserStory(page_set_dict[csv_row[url_index]]) else: raise Exception('Unusable results_file.') return user_stories
31.819277
80
0.723211
1ea5145d666b5b425487cb523a766bfa2fddc8c8
3,595
py
Python
sdk/python/pulumi_aws/kms/alias.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/kms/alias.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/kms/alias.py
Charliekenney23/pulumi-aws
55bd0390160d27350b297834026fee52114a2d41
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from .. import utilities, tables class Alias(pulumi.CustomResource): arn: pulumi.Output[str] """ The Amazon Resource Name (ARN) of the key alias. """ name: pulumi.Output[str] """ The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) """ name_prefix: pulumi.Output[str] """ Creates an unique alias beginning with the specified prefix. The name must start with the word "alias" followed by a forward slash (alias/). Conflicts with `name`. """ target_key_arn: pulumi.Output[str] """ The Amazon Resource Name (ARN) of the target key identifier. """ target_key_id: pulumi.Output[str] """ Identifier for the key for which the alias is for, can be either an ARN or key_id. """ def __init__(__self__, resource_name, opts=None, name=None, name_prefix=None, target_key_id=None, __name__=None, __opts__=None): """ Provides an alias for a KMS customer master key. AWS Console enforces 1-to-1 mapping between aliases & keys, but API (hence Terraform too) allows you to create as many aliases as the [account limits](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) allow you. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) :param pulumi.Input[str] name_prefix: Creates an unique alias beginning with the specified prefix. The name must start with the word "alias" followed by a forward slash (alias/). Conflicts with `name`. :param pulumi.Input[str] target_key_id: Identifier for the key for which the alias is for, can be either an ARN or key_id. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['name'] = name __props__['name_prefix'] = name_prefix if target_key_id is None: raise TypeError("Missing required property 'target_key_id'") __props__['target_key_id'] = target_key_id __props__['arn'] = None __props__['target_key_arn'] = None super(Alias, __self__).__init__( 'aws:kms/alias:Alias', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
42.294118
148
0.672045
2f035323fe1909b4337ef1fd89accb7aae86378b
15,317
py
Python
tests/test_schema.py
mnm678/securesystemslib
caf029ea61339dc5fa9beedf230ca1d026129169
[ "MIT" ]
1
2021-11-27T00:05:15.000Z
2021-11-27T00:05:15.000Z
tests/test_schema.py
trailofbits/securesystemslib
9b3a78e998412c39418efb29f411591b20cdd236
[ "MIT" ]
null
null
null
tests/test_schema.py
trailofbits/securesystemslib
9b3a78e998412c39418efb29f411591b20cdd236
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ <Program Name> test_schema.py <Author> Vladimir Diaz <vladimir.v.diaz@gmail.com> <Started> October 2012. <Copyright> See LICENSE for licensing information. <Purpose> Unit test for 'schema.py' """ # Help with Python 3 compatibility, where the print statement is a function, an # implicit relative import is invalid, and the '/' operator performs true # division. Example: print 'hello world' raises a 'SyntaxError' exception. from __future__ import print_function from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import unittest import re import securesystemslib.exceptions import securesystemslib.schema as SCHEMA class TestSchema(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_Schema(self): # Test conditions for the instantation of classes that inherit # from class Schema(). class NewSchema(SCHEMA.Schema): def __init__(self): pass new_schema = NewSchema() self.assertRaises(NotImplementedError, new_schema.matches, 'test') # Define a new schema. class NewSchema2(SCHEMA.Schema): def __init__(self, string): self._string = string def check_match(self, object): if self._string != object: message = 'Expected: '+repr(self._string) raise securesystemslib.exceptions.FormatError(message) new_schema2 = NewSchema2('test') self.assertRaises(securesystemslib.exceptions.FormatError, new_schema2.check_match, 'bad') self.assertFalse(new_schema2.matches('bad')) self.assertTrue(new_schema2.matches('test')) # Test conditions for invalid arguments. self.assertRaises(securesystemslib.exceptions.FormatError, new_schema2.check_match, True) self.assertRaises(securesystemslib.exceptions.FormatError, new_schema2.check_match, NewSchema2) self.assertRaises(securesystemslib.exceptions.FormatError, new_schema2.check_match, 123) self.assertFalse(new_schema2.matches(True)) self.assertFalse(new_schema2.matches(NewSchema2)) self.assertFalse(new_schema2.matches(123)) def test_Any(self): # Test conditions for valid arguments. any_schema = SCHEMA.Any() self.assertTrue(any_schema.matches('test')) self.assertTrue(any_schema.matches(123)) self.assertTrue(any_schema.matches(['test'])) self.assertTrue(any_schema.matches({'word':'definition'})) self.assertTrue(any_schema.matches(True)) def test_String(self): # Test conditions for valid arguments. string_schema = SCHEMA.String('test') self.assertTrue(string_schema.matches('test')) # Test conditions for invalid arguments. self.assertFalse(string_schema.matches(True)) self.assertFalse(string_schema.matches(['test'])) self.assertFalse(string_schema.matches(SCHEMA.Schema)) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.String, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.String, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.String, {'a': 1}) def test_AnyString(self): # Test conditions for valid arguments. anystring_schema = SCHEMA.AnyString() self.assertTrue(anystring_schema.matches('')) self.assertTrue(anystring_schema.matches('a string')) # Test conditions for invalid arguments. self.assertFalse(anystring_schema.matches(['a'])) self.assertFalse(anystring_schema.matches(3)) self.assertFalse(anystring_schema.matches({'a': 'string'})) def test_OneOf(self): # Test conditions for valid arguments. oneof_schema = SCHEMA.OneOf([SCHEMA.ListOf(SCHEMA.Integer()), SCHEMA.String('Hello'), SCHEMA.String('bye')]) self.assertTrue(oneof_schema.matches([])) self.assertTrue(oneof_schema.matches('bye')) self.assertTrue(oneof_schema.matches([1,2])) # Test conditions for invalid arguments. self.assertFalse(oneof_schema.matches(3)) self.assertFalse(oneof_schema.matches(['Hi'])) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.OneOf, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.OneOf, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.OneOf, {'a': 1}) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.OneOf, [SCHEMA.AnyString(), 1]) def test_AllOf(self): # Test conditions for valid arguments. allof_schema = SCHEMA.AllOf([SCHEMA.Any(), SCHEMA.AnyString(), SCHEMA.String('a')]) self.assertTrue(allof_schema.matches('a')) # Test conditions for invalid arguments. self.assertFalse(allof_schema.matches('b')) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.AllOf, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.AllOf, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.AllOf, {'a': 1}) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.AllOf, [SCHEMA.AnyString(), 1]) def test_Boolean(self): # Test conditions for valid arguments. boolean_schema = SCHEMA.Boolean() self.assertTrue(boolean_schema.matches(True) and boolean_schema.matches(False)) # Test conditions for invalid arguments. self.assertFalse(boolean_schema.matches(11)) def test_ListOf(self): # Test conditions for valid arguments. listof_schema = SCHEMA.ListOf(SCHEMA.RegularExpression('(?:..)*')) listof2_schema = SCHEMA.ListOf(SCHEMA.Integer(), min_count=3, max_count=10) self.assertTrue(listof_schema.matches([])) self.assertTrue(listof_schema.matches(['Hi', 'this', 'list', 'is', 'full', 'of', 'even', 'strs'])) self.assertTrue(listof2_schema.matches([3]*3)) self.assertTrue(listof2_schema.matches([3]*10)) # Test conditions for invalid arguments. self.assertFalse(listof_schema.matches('hi')) self.assertFalse(listof_schema.matches({})) self.assertFalse(listof_schema.matches(['This', 'one', 'is not'])) self.assertFalse(listof2_schema.matches([3]*2)) self.assertFalse(listof2_schema.matches(([3]*11))) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.ListOf, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.ListOf, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.ListOf, {'a': 1}) def test_Integer(self): # Test conditions for valid arguments. integer_schema = SCHEMA.Integer() self.assertTrue(integer_schema.matches(99)) self.assertTrue(SCHEMA.Integer(lo=10, hi=30).matches(25)) # Test conditions for invalid arguments. self.assertFalse(integer_schema.matches(False)) self.assertFalse(integer_schema.matches('a string')) self.assertFalse(SCHEMA.Integer(lo=10, hi=30).matches(5)) def test_DictOf(self): # Test conditions for valid arguments. dictof_schema = SCHEMA.DictOf(SCHEMA.RegularExpression(r'[aeiou]+'), SCHEMA.Struct([SCHEMA.AnyString(), SCHEMA.AnyString()])) self.assertTrue(dictof_schema.matches({})) self.assertTrue(dictof_schema.matches({'a': ['x', 'y'], 'e' : ['', '']})) # Test conditions for invalid arguments. self.assertFalse(dictof_schema.matches('')) self.assertFalse(dictof_schema.matches({'a': ['x', 3], 'e' : ['', '']})) self.assertFalse(dictof_schema.matches({'a': ['x', 'y'], 'e' : ['', ''], 'd' : ['a', 'b']})) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.DictOf, 1, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.DictOf, [1], [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.DictOf, {'a': 1}, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.DictOf, SCHEMA.AnyString(), 1) def test_Optional(self): # Test conditions for valid arguments. optional_schema = SCHEMA.Object(k1=SCHEMA.String('X'), k2=SCHEMA.Optional(SCHEMA.String('Y'))) self.assertTrue(optional_schema.matches({'k1': 'X', 'k2': 'Y'})) self.assertTrue(optional_schema.matches({'k1': 'X'})) # Test conditions for invalid arguments. self.assertFalse(optional_schema.matches({'k1': 'X', 'k2': 'Z'})) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Optional, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Optional, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Optional, {'a': 1}) def test_Object(self): # Test conditions for valid arguments. object_schema = SCHEMA.Object(a=SCHEMA.AnyString(), bc=SCHEMA.Struct([SCHEMA.Integer(), SCHEMA.Integer()])) self.assertTrue(object_schema.matches({'a':'ZYYY', 'bc':[5,9]})) self.assertTrue(object_schema.matches({'a':'ZYYY', 'bc':[5,9], 'xx':5})) # Test conditions for invalid arguments. self.assertFalse(object_schema.matches({'a':'ZYYY', 'bc':[5,9,3]})) self.assertFalse(object_schema.matches({'a':'ZYYY'})) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Object, a='a') self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Object, a=[1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Object, a=SCHEMA.AnyString(), b=1) # Test condition for invalid non-dict arguments. self.assertFalse(object_schema.matches([{'a':'XYZ'}])) self.assertFalse(object_schema.matches(8)) def test_Struct(self): # Test conditions for valid arguments. struct_schema = SCHEMA.Struct([SCHEMA.ListOf(SCHEMA.AnyString()), SCHEMA.AnyString(), SCHEMA.String('X')]) struct2_schema = SCHEMA.Struct([SCHEMA.String('X')], allow_more=True) struct3_schema = SCHEMA.Struct([SCHEMA.String('X'), SCHEMA.Integer()], [SCHEMA.Integer()]) self.assertTrue(struct_schema.matches([[], 'Q', 'X'])) self.assertTrue(struct2_schema.matches(['X'])) self.assertTrue(struct2_schema.matches(['X', 'Y'])) self.assertTrue(struct2_schema.matches(['X', ['Y', 'Z']])) self.assertTrue(struct3_schema.matches(['X', 3])) self.assertTrue(struct3_schema.matches(['X', 3, 9])) # Test conditions for invalid arguments. self.assertFalse(struct_schema.matches(False)) self.assertFalse(struct_schema.matches('Foo')) self.assertFalse(struct_schema.matches([[], 'Q', 'D'])) self.assertFalse(struct_schema.matches([[3], 'Q', 'X'])) self.assertFalse(struct_schema.matches([[], 'Q', 'X', 'Y'])) self.assertFalse(struct2_schema.matches([])) self.assertFalse(struct2_schema.matches([['X']])) self.assertFalse(struct3_schema.matches([])) self.assertFalse(struct3_schema.matches({})) self.assertFalse(struct3_schema.matches(['X'])) self.assertFalse(struct3_schema.matches(['X', 3, 9, 11])) self.assertFalse(struct3_schema.matches(['X', 3, 'A'])) # Test conditions for invalid arguments in a schema definition. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Struct, 1) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Struct, [1]) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Struct, {'a': 1}) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.Struct, [SCHEMA.AnyString(), 1]) def test_RegularExpression(self): # Test conditions for valid arguments. # RegularExpression(pattern, modifiers, re_object, re_name). re_schema = SCHEMA.RegularExpression('h.*d') self.assertTrue(re_schema.matches('hello world')) # Provide a pattern that contains the trailing '$' re_schema_2 = SCHEMA.RegularExpression(pattern='abc$', modifiers=0, re_object=None, re_name='my_re') self.assertTrue(re_schema_2.matches('abc')) # Test for valid optional arguments. compiled_re = re.compile('^[a-z].*') re_schema_optional = SCHEMA.RegularExpression(pattern='abc', modifiers=0, re_object=compiled_re, re_name='my_re') self.assertTrue(re_schema_optional.matches('abc')) # Valid arguments, but the 'pattern' argument is unset (required if the # 're_object' is 'None'.) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.RegularExpression, None, 0, None, None) # Valid arguments, 're_name' is unset, and 'pattern' is None. An exception # is not raised, but 're_name' is set to 'pattern'. re_schema_optional = SCHEMA.RegularExpression(pattern=None, modifiers=0, re_object=compiled_re, re_name=None) self.assertTrue(re_schema_optional.matches('abc')) self.assertTrue(re_schema_optional._re_name == 'pattern') # Test conditions for invalid arguments. self.assertFalse(re_schema.matches('Hello World')) self.assertFalse(re_schema.matches('hello world!')) self.assertFalse(re_schema.matches([33, 'Hello'])) self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.RegularExpression, 8) def test_LengthString(self): # Test conditions for valid arguments. length_string = SCHEMA.LengthString(11) self.assertTrue(length_string.matches('Hello World')) self.assertTrue(length_string.matches('Hello Marty')) # Test conditions for invalid arguments. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.LengthString, 'hello') self.assertFalse(length_string.matches('hello')) self.assertFalse(length_string.matches(8)) def test_LengthBytes(self): # Test conditions for valid arguments. length_bytes = SCHEMA.LengthBytes(11) self.assertTrue(length_bytes.matches(b'Hello World')) self.assertTrue(length_bytes.matches(b'Hello Marty')) # Test conditions for invalid arguments. self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.LengthBytes, 'hello') self.assertRaises(securesystemslib.exceptions.FormatError, SCHEMA.LengthBytes, True) self.assertFalse(length_bytes.matches(b'hello')) self.assertFalse(length_bytes.matches(8)) def test_AnyBytes(self): # Test conditions for valid arguments. anybytes_schema = SCHEMA.AnyBytes() self.assertTrue(anybytes_schema.matches(b'')) self.assertTrue(anybytes_schema.matches(b'a string')) # Test conditions for invalid arguments. self.assertFalse(anybytes_schema.matches('a string')) self.assertFalse(anybytes_schema.matches(['a'])) self.assertFalse(anybytes_schema.matches(3)) self.assertFalse(anybytes_schema.matches({'a': 'string'})) # Run the unit tests. if __name__ == '__main__': unittest.main()
34.266219
83
0.709081
212ee81ae6bb5d4d0cd548e6ab62cab8ca390b88
1,721
py
Python
tests/unit/executors/test_dump_executor_with_drivers.py
liushuigs/jina
b3550e901b2a340924330b5ba2801603e493c933
[ "Apache-2.0" ]
null
null
null
tests/unit/executors/test_dump_executor_with_drivers.py
liushuigs/jina
b3550e901b2a340924330b5ba2801603e493c933
[ "Apache-2.0" ]
2
2021-02-15T01:40:38.000Z
2021-02-15T02:00:21.000Z
tests/unit/executors/test_dump_executor_with_drivers.py
liushuigs/jina
b3550e901b2a340924330b5ba2801603e493c933
[ "Apache-2.0" ]
null
null
null
import os import pickle import pytest from jina.drivers.control import RouteDriver from jina.executors import BaseExecutor cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_dump_driver(tmpdir): rd = RouteDriver(raise_no_dealer=True) rd.idle_dealer_ids = ('hello', 'there') with open(str(tmpdir / 'a.bin'), 'wb') as fp: pickle.dump(rd, fp) with open(str(tmpdir / 'a.bin'), 'rb') as fp: p = pickle.load(fp) # init args & kwargs values should be save assert p.raise_no_dealer # other stateful values should be reset to init()'s time assert not p.idle_dealer_ids def test_dump_exegit_scutor_without_drivers(tmpdir): # Create an executor from a yaml file and store it to disc executor_a = BaseExecutor.load_config(f'{cur_dir}/yaml/route.yml') executor_a.touch() executor_a._drivers['ControlRequest'][0].idle_dealer_ids = ('hello', 'there') executor_a.save(str(tmpdir / 'aux.bin')) # load the saved executor_a as executor_b executor_b = BaseExecutor.load(str(tmpdir / 'aux.bin')) assert hasattr(executor_b, '_drivers') is False @pytest.fixture def temp_workspace(tmpdir): os.environ['JINA_TEST_LOAD_FROM_DUMP_WORKSPACE'] = str(tmpdir) yield del os.environ['JINA_TEST_LOAD_FROM_DUMP_WORKSPACE'] def test_drivers_renewed_from_yml_when_loaded_from_dump(temp_workspace): executor_a = BaseExecutor.load_config(f'{cur_dir}/yaml/example_1.yml') assert executor_a._drivers['SearchRequest'][0]._is_update is True with executor_a: executor_a.touch() executor_b = BaseExecutor.load_config(f'{cur_dir}/yaml/example_2.yml') assert executor_b._drivers['SearchRequest'][0]._is_update is False
30.732143
81
0.728646
c05cd0448f442bd3d531a82cc383a6bd232a8197
19,726
py
Python
grow_bert/progressive/masked_lm.py
DionysisChristopoulos/google-research
7f59ef421beef32ca16c2a7215be74f7eba01a0f
[ "Apache-2.0" ]
7
2021-06-15T05:54:29.000Z
2022-02-21T06:57:06.000Z
grow_bert/progressive/masked_lm.py
DionysisChristopoulos/google-research
7f59ef421beef32ca16c2a7215be74f7eba01a0f
[ "Apache-2.0" ]
12
2021-08-25T16:15:31.000Z
2022-02-10T05:10:37.000Z
grow_bert/progressive/masked_lm.py
DionysisChristopoulos/google-research
7f59ef421beef32ca16c2a7215be74f7eba01a0f
[ "Apache-2.0" ]
5
2021-11-25T07:40:17.000Z
2022-03-22T11:13:39.000Z
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked language task with progressive training.""" # Lint as: python3 import copy import os from typing import List, Optional from absl import logging import dataclasses import numpy import orbit import tensorflow as tf import yaml from grow_bert.lowcost.config import config_encoder as ecfg from grow_bert.lowcost.models import bert_pretrain_model as small_pretrainer from grow_bert.lowcost.models import pretrain_dataloader as small_dataloader from grow_bert.lowcost.models import transformer_encoder as small_encoder_lib from official.core import task_factory from official.modeling import optimization from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.modeling.hyperparams import config_definitions as cfg from official.modeling.progressive import policies from official.nlp.configs import bert from official.nlp.configs import encoders from official.nlp.data import pretrain_dataloader from official.nlp.modeling import layers from official.nlp.tasks import masked_lm @dataclasses.dataclass class MaskedLMConfig(cfg.TaskConfig): """The model config.""" init_checkpoint: str = '' model: bert.PretrainerConfig = bert.PretrainerConfig( cls_heads=[ bert.ClsHeadConfig( inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence') ], encoder=encoders.EncoderConfig(bert=encoders.BertEncoderConfig())) scale_loss: bool = False train_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig( ) small_train_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig( ) validation_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig( ) @dataclasses.dataclass class StackingStageConfig(base_config.Config): num_steps: int = 0 warmup_steps: int = 10000 initial_learning_rate: float = 1e-4 end_learning_rate: float = 0.0 decay_steps: int = 1000000 override_num_layers: Optional[int] = None small_encoder_config: Optional[ ecfg.SmallEncoderConfig] = ecfg.SmallEncoderConfig() override_train_data: Optional[ pretrain_dataloader .BertPretrainDataConfig] = pretrain_dataloader.BertPretrainDataConfig() override_valid_data: Optional[ pretrain_dataloader .BertPretrainDataConfig] = pretrain_dataloader.BertPretrainDataConfig() @dataclasses.dataclass class ProgStackingConfig(policies.ProgressiveConfig): stage_list: List[StackingStageConfig] = dataclasses.field( default_factory=lambda: [ # pylint: disable=g-long-lambda StackingStageConfig( num_steps=3000, warmup_steps=10000, initial_learning_rate=1e-4, end_learning_rate=1e-4, decay_steps=1000000), StackingStageConfig( num_steps=3000, warmup_steps=10000, initial_learning_rate=1e-4, end_learning_rate=1e-4, decay_steps=1000000) ]) @task_factory.register_task_cls(MaskedLMConfig) class ProgressiveMaskedLM(policies.ProgressivePolicy, masked_lm.MaskedLMTask): """Mask language modeling with progressive policy.""" def __init__(self, strategy, progressive_config, optimizer_config, train_data_config, small_train_data_config, task_config, logging_dir=None): """Initialize progressive training manager before the training loop starts. Arguments: strategy: A distribution strategy. progressive_config: ProgressiveConfig. Configuration for this class. optimizer_config: optimization_config.OptimizerConfig. Configuration for building the optimizer. train_data_config: config_definitions.DataConfig. Configuration for building the training dataset. task_config: TaskConfig. This is used in base_task.Task. logging_dir: a string pointing to where the model, summaries etc. will be saved. This is used in base_task.Task. """ self._strategy = strategy self._progressive_config = progressive_config self._optimizer_config = optimizer_config self._train_data_config = train_data_config self._small_train_data_config = small_train_data_config self._model_config: bert.PretrainerConfig = task_config.model masked_lm.MaskedLMTask.__init__( self, params=task_config, logging_dir=logging_dir) policies.ProgressivePolicy.__init__(self) # Overrides policies.ProgressivePolicy def get_model(self, stage_id, old_model=None): """Build model for each stage.""" stage_config: StackingStageConfig = self._progressive_config.stage_list[ stage_id] if stage_config.small_encoder_config is not None: encoder_cfg: ecfg.TransformerEncoderConfig = ecfg.from_bert_encoder_config( self._model_config.encoder.bert, stage_config.small_encoder_config) model_cfg = copy.deepcopy(self._model_config) model_cfg.encoder = encoders.EncoderConfig(bert=encoder_cfg) model = self.build_small_model(model_cfg.as_dict()) else: model_config = copy.deepcopy(self._model_config) if stage_config.override_num_layers is not None: model_config.encoder.bert.num_layers = stage_config.override_num_layers model = self.build_model(model_config) _ = model(model.inputs) if stage_id == 0: self.initialize(model) if stage_id > 0 and old_model is not None: logging.info('Stage %d copying weights.', stage_id) self.transform_model(small_model=old_model, model=model) return model # overrides policies.ProgressivePolicy def get_train_dataset(self, stage_id): stage_config = self._progressive_config.stage_list[stage_id] if stage_config.small_encoder_config is not None: train_data_config = self._small_train_data_config if stage_config.override_train_data is not None: logging.info('stage %d: override small train data to %s', stage_id, stage_config.override_train_data) train_data_config = stage_config.override_train_data return orbit.utils.make_distributed_dataset(self._strategy, self.build_small_inputs, train_data_config) train_data_config = self._train_data_config if stage_config.override_train_data is not None: train_data_config = stage_config.override_train_data logging.info('stage %d: override full train data to %s', stage_id, stage_config.override_train_data) return orbit.utils.make_distributed_dataset(self._strategy, self.build_inputs, train_data_config) # overrides policies.ProgressivePolicy def get_eval_dataset(self, stage_id): build_func = self.build_inputs stage_config = self._progressive_config.stage_list[stage_id] if stage_config.small_encoder_config is not None: build_func = self.build_small_inputs valid_data_config = self.task_config.validation_data if stage_config.override_valid_data is not None: valid_data_config = stage_config.override_valid_data logging.info('stage %d: override full valid data to %s', stage_id, stage_config.override_valid_data) return orbit.utils.make_distributed_dataset(self._strategy, build_func, valid_data_config) def build_small_inputs(self, params, input_context=None): """Returns tf.data.Dataset for pretraining.""" return small_dataloader.PretrainDataLoader(params).load(input_context) # Overrides policies.ProgressivePolicy def num_stages(self): return len(self._progressive_config.stage_list) # Overrides policies.ProgressivePolicy def num_steps(self, stage_id): return self._progressive_config.stage_list[stage_id].num_steps # Overrides policies.ProgressivePolicy def get_optimizer(self, stage_id): """Build optimizer for each stage.""" params = self._optimizer_config.replace( learning_rate={ 'polynomial': { 'decay_steps': self._progressive_config.stage_list[stage_id].decay_steps, 'initial_learning_rate': self._progressive_config.stage_list[stage_id] .initial_learning_rate, 'end_learning_rate': self._progressive_config.stage_list[stage_id] .end_learning_rate, } }, warmup={ 'polynomial': { 'warmup_steps': self._progressive_config.stage_list[stage_id].warmup_steps, } }) opt_factory = optimization.OptimizerFactory(params) optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate()) return optimizer def build_small_model(self, model_cfg): encoder_cfg = model_cfg['encoder']['bert'] dataconf = self.task_config.train_data encoder_network = small_encoder_lib.TransformerEncoder( vocab_size=encoder_cfg['vocab_size'], hidden_size=encoder_cfg['hidden_size'], num_layers=encoder_cfg['num_layers'], num_attention_heads=encoder_cfg['num_attention_heads'], intermediate_size=encoder_cfg['intermediate_size'], activation=tf_utils.get_activation(encoder_cfg['hidden_activation']), dropout_rate=encoder_cfg['dropout_rate'], attention_dropout_rate=encoder_cfg['attention_dropout_rate'], max_sequence_length=encoder_cfg['max_position_embeddings'], type_vocab_size=encoder_cfg['type_vocab_size'], initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg['initializer_range']), net2net_ratio=encoder_cfg['net2net_ratio'], net2net_layers=encoder_cfg['net2net_layers'], lightatt_layers=encoder_cfg['lightatt_layers'], input_pool_name=encoder_cfg['input_pool_name'], input_pool_size=encoder_cfg['input_pool_size']) sequence_length = dataconf.seq_length predict_length = dataconf.max_predictions_per_seq dummy_inputs = dict( input_mask=tf.zeros((1, sequence_length), dtype=tf.int32), input_positions=tf.zeros((1, sequence_length), dtype=tf.int32), input_type_ids=tf.zeros((1, sequence_length), dtype=tf.int32), input_word_ids=tf.zeros((1, sequence_length), dtype=tf.int32), masked_lm_positions=tf.zeros((1, predict_length), dtype=tf.int32), masked_input_ids=tf.zeros((1, predict_length), dtype=tf.int32), masked_segment_ids=tf.zeros((1, predict_length), dtype=tf.int32), masked_lm_weights=tf.zeros((1, predict_length), dtype=tf.float32)) _ = encoder_network(dummy_inputs) if 'cls_heads' in model_cfg: classification_heads = [ layers.ClassificationHead(**cfg) for cfg in model_cfg['cls_heads'] ] else: classification_heads = [] model = small_pretrainer.BertPretrainModel( mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg['initializer_range']), mlm_activation=tf_utils.get_activation( encoder_cfg['hidden_activation']), encoder_network=encoder_network, classification_heads=classification_heads) _ = model(dummy_inputs) return model @staticmethod def transform_model(small_model, model): # copy variable weights # pylint: disable=protected-access encoder = model.encoder_network small_encoder = small_model.encoder_network model_embed_width = encoder.get_config()['embedding_width'] model_hidden_size = encoder.get_config()['hidden_size'] if model_embed_width is not None and model_embed_width != model_hidden_size: encoder._embedding_projection.set_weights( small_encoder._embedding_projection_layer.get_weights()) encoder._embedding_layer.set_weights( small_encoder._embedding_layer.get_weights()) encoder._type_embedding_layer.set_weights( small_encoder._type_embedding_layer.get_weights()) encoder._position_embedding_layer.set_weights( small_encoder._position_embedding_layer.get_weights()) encoder._embedding_norm_layer.set_weights( small_encoder._embedding_norm_layer.get_weights()) encoder._pooler_layer.set_weights(small_encoder._pooler_layer.get_weights()) model.masked_lm.bias.assign(small_model.masked_lm.bias) model.masked_lm.dense.set_weights(small_model.masked_lm.dense.get_weights()) model.masked_lm.layer_norm.set_weights( small_model.masked_lm.layer_norm.get_weights()) for i, cls_head in enumerate(model.classification_heads): cls_head.set_weights(small_model.classification_heads[i].get_weights()) small_layers = small_encoder.transformer_layers small_num_layers = len(small_layers) num_layers = len(encoder.transformer_layers) logging.info('num_layers: %d, num_small_layers: %d', num_layers, small_num_layers) if small_num_layers != num_layers: for i, layer in enumerate(encoder.transformer_layers): small_idx = i % small_num_layers logging.info('stack: %d -> %d', i, small_idx) small_layer = small_layers[small_idx] layer.set_weights(small_layer.get_weights()) else: for i, layer in enumerate(encoder.transformer_layers): logging.info('!!! recover layer %d', i) small_layer = small_layers[i] # init attention layer attention_layer = layer._attention_layer small_attention_layer = small_layer._attention_layer attention_layer._value_dense.set_weights( small_attention_layer._value_dense.get_weights()) attention_layer._output_dense.set_weights( small_attention_layer._output_dense.get_weights()) if hasattr( small_layer, 'use_lightatt') and small_layer.use_lightatt and ( not hasattr(layer, 'use_lightatt') or not layer.use_lightatt): logging.info('!!! recover lightatt') attention_layer._key_dense.set_weights(encoder.transformer_layers[ i - 1]._attention_layer._key_dense.get_weights()) attention_layer._query_dense.set_weights(encoder.transformer_layers[ i - 1]._attention_layer._query_dense.get_weights()) else: attention_layer._key_dense.set_weights( small_attention_layer._key_dense.get_weights()) attention_layer._query_dense.set_weights( small_attention_layer._query_dense.get_weights()) if hasattr(small_layer, 'net2net_ratio') and small_layer.net2net_ratio is not None: if hasattr(layer, 'net2net_ratio') and layer.net2net_ratio is not None: layer._output_dense_small.set_weights( small_layer._output_dense_small.get_weights()) layer._intermediate_dense_small.set_weights( small_layer._intermediate_dense_small.get_weights()) else: k = int(1 // small_layer.net2net_ratio) logging.info('!!! recover net2net %d', k) output_kernel, output_bias = layer._output_dense.get_weights() interm_kernel, interm_bias = layer._intermediate_dense.get_weights() output_small_kernel, output_small_bias = small_layer._output_dense_small.get_weights( ) interm_small_kernel, interm_small_bias = small_layer._intermediate_dense_small.get_weights( ) # check size small_interm_size = interm_small_kernel.shape[1] assert interm_kernel.shape[0] == output_kernel.shape[ 1] == output_bias.shape[0] == model_hidden_size error_message = ( f'interm_kernel.shape1={interm_kernel.shape[1]}, ' f'output_kernel.shape[0]={output_kernel.shape[0]}, ' f'small_interm_size={small_interm_size}, k={k}') assert interm_kernel.shape[1] == output_kernel.shape[ 0] == interm_bias.shape[ 0] == small_interm_size * k, error_message # restore new_output_bias = output_small_bias new_interm_bias = numpy.tile(interm_small_bias, k) new_interm_kernel = numpy.tile(interm_small_kernel, [1, k]) new_output_kernel = numpy.tile(output_small_kernel, [k, 1]) / k layer._output_dense.set_weights( [new_output_kernel, new_output_bias]) layer._intermediate_dense.set_weights( [new_interm_kernel, new_interm_bias]) else: layer._output_dense.set_weights( small_layer._output_dense.get_weights()) layer._intermediate_dense.set_weights( small_layer._intermediate_dense.get_weights()) layer._output_layer_norm.set_weights( small_layer._output_layer_norm.get_weights()) layer._attention_layer_norm.set_weights( small_layer._attention_layer_norm.get_weights()) # pylint: enable=protected-access def initialize(self, model): init_dir_or_path = self.task_config.init_checkpoint logging.info('init dir_or_path: %s', init_dir_or_path) if not init_dir_or_path: return if tf.io.gfile.isdir(init_dir_or_path): init_dir = init_dir_or_path init_path = tf.train.latest_checkpoint(init_dir_or_path) else: init_path = init_dir_or_path init_dir = os.path.dirname(init_path) logging.info('init dir: %s', init_dir) logging.info('init path: %s', init_path) # restore from small model init_yaml_path = os.path.join(init_dir, 'params.yaml') if not tf.io.gfile.exists(init_yaml_path): init_yaml_path = os.path.join(os.path.dirname(init_dir), 'params.yaml') with tf.io.gfile.GFile(init_yaml_path, 'r') as rf: init_yaml_config = yaml.safe_load(rf) init_model_config = init_yaml_config['task']['model'] if 'progressive' in init_yaml_config['trainer']: stage_list = init_yaml_config['trainer']['progressive']['stage_list'] if stage_list: small_encoder_config = stage_list[-1]['small_encoder_config'] if small_encoder_config is not None: small_encoder_config = ecfg.from_bert_encoder_config( init_model_config['encoder']['bert'], small_encoder_config) init_model_config['encoder']['bert'] = small_encoder_config.as_dict() # check if model size matches assert init_model_config['encoder']['bert'][ 'hidden_size'] == model.encoder_network.get_config()['hidden_size'] # build small model small_model = self.build_small_model(init_model_config) ckpt = tf.train.Checkpoint(model=small_model) ckpt.restore(init_path).assert_existing_objects_matched() self.transform_model(small_model, model)
43.738359
108
0.70587
4aab98f123c7d9a623de6eea151e4ef11008dead
8,372
py
Python
data/sheet_to_story.py
tcyrus/AIDungeon
591f318091f46306d01a5307505534a32bd18024
[ "MIT" ]
2,409
2019-12-08T23:24:46.000Z
2020-12-15T00:46:07.000Z
data/sheet_to_story.py
tcyrus/AIDungeon
591f318091f46306d01a5307505534a32bd18024
[ "MIT" ]
181
2019-12-08T23:24:15.000Z
2020-08-19T15:07:33.000Z
data/sheet_to_story.py
tcyrus/AIDungeon
591f318091f46306d01a5307505534a32bd18024
[ "MIT" ]
401
2019-12-08T23:48:25.000Z
2020-12-01T12:20:30.000Z
""" format of tree is dict { tree_id: tree_id_text context: context text? first_story_block action_results: [act_res1, act_res2, act_res3...] } where each action_result's format is: dict{ action: action_text result: result_text action_results: [act_res1, act_res2, act_res3...] } """ import csv import json import os def data_to_forest(filename): trees = [] rows = [] with open(filename, newline="") as f: reader = csv.reader(f) for row in reader: rows.append(row) for i in range(1, len(rows[0])): tree = {} tree["tree_id"] = "upwork" + str(i) tree["context"] = "" tree["first_story_block"] = rows[1][i] tree["action_results"] = [] current_action_results = tree["action_results"] row_ind = 2 while row_ind < len(rows): action_result = {} action_result["action"] = rows[row_ind][i] if row_ind + 1 < len(rows): action_result["result"] = rows[row_ind + 1][i] else: action_result["result"] = None action_result["action_results"] = [] current_action_results.append(action_result) current_action_results = action_result["action_results"] row_ind += 2 trees.append(tree) return trees def build_action_samples_helper(context, story_block, action_results, path, tree_id): samples = [] for i, action_result in enumerate(action_results): new_path = path[:] new_path.append(i) if ( len(action_result["action_results"]) is 0 and action_result["result"] is not None ): row = [ tree_id, "".join(str(x) for x in new_path), context, story_block, action_result["action"], action_result["result"], ] samples.append(row) else: sub_result = build_action_samples_helper( context, action_result["result"], action_result["action_results"], new_path, tree_id, ) samples += sub_result return samples def make_write_actions_batch(forest, filename): # Traverse to the bottom levels of each tree with open(filename, mode="w", newline="") as file: writer = csv.writer( file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) writer.writerow( [ "tree_id", "path", "context", "story_block_1", "previous_action", "story_block_2", ] ) for tree in forest: first_story_block = tree["first_story_block"] samples = build_action_samples_helper( tree["context"], first_story_block, tree["action_results"], [], tree["tree_id"], ) for sample in samples: writer.writerow(sample) def build_result_samples_helper( context, story_block, parent_action_result, path, tree_id ): samples = [] action_results = parent_action_result["action_results"] for i, action_result in enumerate(action_results): new_path = path[:] new_path.append(i) if action_result["result"] is None: row = [ tree_id, "".join(str(x) for x in new_path), context, story_block, parent_action_result["action"], parent_action_result["result"], action_result["action"], ] samples.append(row) else: sub_result = build_result_samples_helper( context, parent_action_result["result"], action_result, new_path, tree_id, ) samples += sub_result return samples def make_write_results_batch(forest, filename): with open(filename, mode="w", newline="") as file: writer = csv.writer( file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) writer.writerow( [ "tree_id", "path", "context", "story_block_1", "previous_action_1", "story_block_2", "previous_action_2", ] ) for tree in forest: first_story_block = tree["first_story_block"] samples = [] for i, action_result in enumerate(tree["action_results"]): path = [i] samples += build_result_samples_helper( tree["context"], first_story_block, action_result, path, tree["tree_id"], ) for sample in samples: writer.writerow(sample) def save_tree(tree, filename): with open(filename, "w") as fp: json.dump(tree, fp) def save_forest(forest, forest_name): if not os.path.exists("./" + forest_name): os.mkdir("./" + forest_name) for tree in forest: save_tree(tree, "./" + forest_name + "/" + tree["tree_id"] + ".json") def load_tree(filename): with open(filename, "r") as fp: tree = json.load(fp) return tree def load_forest(forest_name): files = os.listdir("./" + forest_name) forest = [] for file in files: forest.append(load_tree("./" + forest_name + "/" + file)) return forest def csv_to_dict(file): update_dict = {} field_names = [] with open(file, newline="") as f: reader = csv.reader(f) for row in reader: if len(update_dict) is 0: for item in row: update_dict[item] = [] field_names.append(item) else: for i, item in enumerate(row): update_dict[field_names[i]].append(item) return update_dict def update_forest_with_results(forest_name, update_file): update_dict = csv_to_dict(update_file) tree_dict = {} tree_filenames = os.listdir("./" + forest_name) for file_name in tree_filenames: tree = load_tree("./" + forest_name + "/" + file_name) tree_dict[tree["tree_id"]] = tree for i in range(len(update_dict["Input.tree_id"])): tree = tree_dict[update_dict["Input.tree_id"][i]] current_action_results = tree for choice in update_dict["Input.path"][i]: choice_num = int(choice) current_action_results = current_action_results["action_results"][ choice_num ] current_action_results["result"] = update_dict["Answer.result"][i] return tree_dict.values() def update_forest_with_actions(forest_name, update_file): update_dict = csv_to_dict(update_file) tree_dict = {} tree_filenames = os.listdir("./" + forest_name) for file_name in tree_filenames: tree = load_tree("./" + forest_name + "/" + file_name) tree_dict[tree["tree_id"]] = tree for i in range(len(update_dict["Input.tree_id"])): tree = tree_dict[update_dict["Input.tree_id"][i]] current_action_results = tree for choice in update_dict["Input.path"][i]: choice_num = int(choice) current_action_results = current_action_results["action_results"][ choice_num ] current_action_results["action_results"].append( { "action": update_dict["Answer.action_1"][i], "result": None, "action_results": [], } ) current_action_results["action_results"].append( { "action": update_dict["Answer.action_2"][i], "result": None, "action_results": [], } ) return tree_dict.values() tree = data_to_forest("upwork.csv") for i, story in enumerate(tree): save_tree(story, "crowdsourcedstory" + str(i) + ".json") print("done")
28
85
0.53667
65ed59272b44f39871d62d840341da4f9971438b
158
py
Python
remo/remozilla/apps.py
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
23ca8d46496b491fbdb2b8a72c91e75372932f23
[ "BSD-3-Clause" ]
27
2015-01-02T18:47:56.000Z
2021-08-14T11:48:24.000Z
remo/remozilla/apps.py
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
23ca8d46496b491fbdb2b8a72c91e75372932f23
[ "BSD-3-Clause" ]
450
2015-01-02T12:29:50.000Z
2020-10-27T21:41:38.000Z
remo/remozilla/apps.py
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
23ca8d46496b491fbdb2b8a72c91e75372932f23
[ "BSD-3-Clause" ]
81
2015-01-10T23:59:32.000Z
2021-08-19T17:08:56.000Z
from django.apps import AppConfig class RemozillaConfig(AppConfig): name = 'remo.remozilla' label = 'remozilla' verbose_name = 'ReMo Remozilla'
19.75
35
0.721519
42d9fc096f17fe81a81a429247f9e262d0dfb1ef
321
py
Python
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/speke/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
11
2019-07-03T10:41:16.000Z
2022-02-25T21:48:06.000Z
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/speke/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
8
2019-11-23T00:01:25.000Z
2021-04-29T12:30:31.000Z
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/speke/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
13
2020-01-02T14:58:18.000Z
2022-03-26T12:10:30.000Z
from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.speke.speke_api import SpekeApi from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.speke.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.speke.speke_drm_list_query_params import SpekeDrmListQueryParams
80.25
121
0.894081
92686727b1a64f7c61688c0783a53396b610aad9
24,436
py
Python
tests/core/test_processor.py
YourThomasLee/rasa
501b1b312c158e19e54c67cbca8ed3728ea60ca3
[ "Apache-2.0" ]
1
2020-07-07T06:58:51.000Z
2020-07-07T06:58:51.000Z
tests/core/test_processor.py
YourThomasLee/rasa
501b1b312c158e19e54c67cbca8ed3728ea60ca3
[ "Apache-2.0" ]
75
2020-08-06T08:55:42.000Z
2022-03-01T13:22:11.000Z
tests/core/test_processor.py
YourThomasLee/rasa
501b1b312c158e19e54c67cbca8ed3728ea60ca3
[ "Apache-2.0" ]
null
null
null
import asyncio import datetime import pytest import time import uuid import json from _pytest.monkeypatch import MonkeyPatch from aioresponses import aioresponses from typing import Optional, Text, List, Callable from unittest.mock import patch, Mock from rasa.core import jobs from rasa.core.actions.action import ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME from rasa.core.agent import Agent from rasa.core.channels.channel import CollectingOutputChannel, UserMessage from rasa.core.domain import SessionConfig, Domain from rasa.core.events import ( ActionExecuted, BotUttered, ReminderCancelled, ReminderScheduled, Restarted, UserUttered, SessionStarted, Event, SlotSet, ) from rasa.core.interpreter import RasaNLUHttpInterpreter, NaturalLanguageInterpreter from rasa.core.policies import SimplePolicyEnsemble from rasa.core.policies.ted_policy import TEDPolicy from rasa.core.processor import MessageProcessor from rasa.core.slots import Slot from rasa.core.tracker_store import InMemoryTrackerStore from rasa.core.trackers import DialogueStateTracker from rasa.nlu.constants import INTENT_NAME_KEY from rasa.utils.endpoints import EndpointConfig from tests.utilities import latest_request from rasa.core.constants import EXTERNAL_MESSAGE_PREFIX, IS_EXTERNAL, DEFAULT_INTENTS import logging logger = logging.getLogger(__name__) async def test_message_processor( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): await default_processor.handle_message( UserMessage('/greet{"name":"Core"}', default_channel) ) assert default_channel.latest_output() == { "recipient_id": "default", "text": "hey there Core!", } async def test_message_id_logging(default_processor: MessageProcessor): message = UserMessage("If Meg was an egg would she still have a leg?") tracker = DialogueStateTracker("1", []) await default_processor._handle_message_with_tracker(message, tracker) logged_event = tracker.events[-1] assert logged_event.message_id == message.message_id assert logged_event.message_id is not None async def test_parsing(default_processor: MessageProcessor): message = UserMessage('/greet{"name": "boy"}') parsed = await default_processor._parse_message(message) assert parsed["intent"][INTENT_NAME_KEY] == "greet" assert parsed["entities"][0]["entity"] == "name" async def test_check_for_unseen_feature(default_processor: MessageProcessor): message = UserMessage('/dislike{"test_entity": "RASA"}') parsed = await default_processor._parse_message(message) with pytest.warns(UserWarning) as record: default_processor._check_for_unseen_features(parsed) assert len(record) == 2 assert ( record[0].message.args[0].startswith("Interpreter parsed an intent 'dislike'") ) assert ( record[1] .message.args[0] .startswith("Interpreter parsed an entity 'test_entity'") ) @pytest.mark.parametrize("default_intent", DEFAULT_INTENTS) async def test_default_intent_recognized( default_processor: MessageProcessor, default_intent: Text ): message = UserMessage(default_intent) parsed = await default_processor._parse_message(message) with pytest.warns(None) as record: default_processor._check_for_unseen_features(parsed) assert len(record) == 0 async def test_http_parsing(): message = UserMessage("lunch?") endpoint = EndpointConfig("https://interpreter.com") with aioresponses() as mocked: mocked.post("https://interpreter.com/model/parse", repeat=True, status=200) inter = RasaNLUHttpInterpreter(endpoint_config=endpoint) try: await MessageProcessor(inter, None, None, None, None)._parse_message( message ) except KeyError: pass # logger looks for intent and entities, so we except r = latest_request(mocked, "POST", "https://interpreter.com/model/parse") assert r async def mocked_parse(self, text, message_id=None, tracker=None): """Mock parsing a text message and augment it with the slot value from the tracker's state.""" return { "intent": {INTENT_NAME_KEY: "", "confidence": 0.0}, "entities": [], "text": text, "requested_language": tracker.get_slot("requested_language"), } async def test_parsing_with_tracker(): tracker = DialogueStateTracker.from_dict("1", [], [Slot("requested_language")]) # we'll expect this value 'en' to be part of the result from the interpreter tracker._set_slot("requested_language", "en") endpoint = EndpointConfig("https://interpreter.com") with aioresponses() as mocked: mocked.post("https://interpreter.com/parse", repeat=True, status=200) # mock the parse function with the one defined for this test with patch.object(RasaNLUHttpInterpreter, "parse", mocked_parse): interpreter = RasaNLUHttpInterpreter(endpoint_config=endpoint) agent = Agent(None, None, interpreter) result = await agent.parse_message_using_nlu_interpreter("lunch?", tracker) assert result["requested_language"] == "en" async def test_reminder_scheduled( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = uuid.uuid4().hex reminder = ReminderScheduled("remind", datetime.datetime.now()) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.update(UserUttered("test")) tracker.update(ActionExecuted("action_schedule_reminder")) tracker.update(reminder) default_processor.tracker_store.save(tracker) await default_processor.handle_reminder( reminder, sender_id, default_channel, default_processor.nlg ) # retrieve the updated tracker t = default_processor.tracker_store.retrieve(sender_id) assert t.events[-5] == UserUttered("test") assert t.events[-4] == ActionExecuted("action_schedule_reminder") assert isinstance(t.events[-3], ReminderScheduled) assert t.events[-2] == UserUttered( f"{EXTERNAL_MESSAGE_PREFIX}remind", intent={INTENT_NAME_KEY: "remind", IS_EXTERNAL: True}, ) assert t.events[-1] == ActionExecuted("action_listen") async def test_reminder_aborted( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = uuid.uuid4().hex reminder = ReminderScheduled( "utter_greet", datetime.datetime.now(), kill_on_user_message=True ) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.update(reminder) tracker.update(UserUttered("test")) # cancels the reminder default_processor.tracker_store.save(tracker) await default_processor.handle_reminder( reminder, sender_id, default_channel, default_processor.nlg ) # retrieve the updated tracker t = default_processor.tracker_store.retrieve(sender_id) assert len(t.events) == 3 # nothing should have been executed async def wait_until_all_jobs_were_executed( timeout_after_seconds: Optional[float] = None, ) -> None: total_seconds = 0.0 while len((await jobs.scheduler()).get_jobs()) > 0 and ( not timeout_after_seconds or total_seconds < timeout_after_seconds ): await asyncio.sleep(0.1) total_seconds += 0.1 if total_seconds >= timeout_after_seconds: jobs.kill_scheduler() raise TimeoutError async def test_reminder_cancelled_multi_user( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_ids = [uuid.uuid4().hex, uuid.uuid4().hex] trackers = [] for sender_id in sender_ids: tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.update(UserUttered("test")) tracker.update(ActionExecuted("action_reminder_reminder")) tracker.update( ReminderScheduled( "greet", datetime.datetime.now(), kill_on_user_message=True ) ) trackers.append(tracker) # cancel all reminders (one) for the first user trackers[0].update(ReminderCancelled()) for tracker in trackers: default_processor.tracker_store.save(tracker) await default_processor._schedule_reminders( tracker.events, tracker, default_channel, default_processor.nlg ) # check that the jobs were added assert len((await jobs.scheduler()).get_jobs()) == 2 for tracker in trackers: await default_processor._cancel_reminders(tracker.events, tracker) # check that only one job was removed assert len((await jobs.scheduler()).get_jobs()) == 1 # execute the jobs await wait_until_all_jobs_were_executed(timeout_after_seconds=5.0) tracker_0 = default_processor.tracker_store.retrieve(sender_ids[0]) # there should be no utter_greet action assert ( UserUttered( f"{EXTERNAL_MESSAGE_PREFIX}greet", intent={INTENT_NAME_KEY: "greet", IS_EXTERNAL: True}, ) not in tracker_0.events ) tracker_1 = default_processor.tracker_store.retrieve(sender_ids[1]) # there should be utter_greet action assert ( UserUttered( f"{EXTERNAL_MESSAGE_PREFIX}greet", intent={INTENT_NAME_KEY: "greet", IS_EXTERNAL: True}, ) in tracker_1.events ) async def test_reminder_cancelled_cancels_job_with_name( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = "][]][xy,,=+2f'[:/;>] <0d]A[e_,02" reminder = ReminderScheduled( intent="greet", trigger_date_time=datetime.datetime.now() ) job_name = reminder.scheduled_job_name(sender_id) reminder_cancelled = ReminderCancelled() assert reminder_cancelled.cancels_job_with_name(job_name, sender_id) assert not reminder_cancelled.cancels_job_with_name(job_name.upper(), sender_id) async def test_reminder_cancelled_cancels_job_with_name_special_name( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = "][]][xy,,=+2f'[:/; >]<0d]A[e_,02" name = "wkjbgr,34(,*&%^^&*(OP#LKMN V#NF# # #R" reminder = ReminderScheduled( intent="greet", trigger_date_time=datetime.datetime.now(), name=name ) job_name = reminder.scheduled_job_name(sender_id) reminder_cancelled = ReminderCancelled(name) assert reminder_cancelled.cancels_job_with_name(job_name, sender_id) assert not reminder_cancelled.cancels_job_with_name(job_name.upper(), sender_id) async def cancel_reminder_and_check( tracker: DialogueStateTracker, default_processor: MessageProcessor, reminder_canceled_event: ReminderCancelled, num_jobs_before: int, num_jobs_after: int, ) -> None: # cancel the sixth reminder tracker.update(reminder_canceled_event) # check that the jobs were added assert len((await jobs.scheduler()).get_jobs()) == num_jobs_before await default_processor._cancel_reminders(tracker.events, tracker) # check that only one job was removed assert len((await jobs.scheduler()).get_jobs()) == num_jobs_after async def test_reminder_cancelled_by_name( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, tracker_with_six_scheduled_reminders: DialogueStateTracker, ): tracker = tracker_with_six_scheduled_reminders await default_processor._schedule_reminders( tracker.events, tracker, default_channel, default_processor.nlg ) # cancel the sixth reminder await cancel_reminder_and_check( tracker, default_processor, ReminderCancelled("special"), 6, 5 ) async def test_reminder_cancelled_by_entities( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, tracker_with_six_scheduled_reminders: DialogueStateTracker, ): tracker = tracker_with_six_scheduled_reminders await default_processor._schedule_reminders( tracker.events, tracker, default_channel, default_processor.nlg ) # cancel the fourth reminder await cancel_reminder_and_check( tracker, default_processor, ReminderCancelled(entities=[{"entity": "name", "value": "Bruce Wayne"}]), 6, 5, ) async def test_reminder_cancelled_by_intent( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, tracker_with_six_scheduled_reminders: DialogueStateTracker, ): tracker = tracker_with_six_scheduled_reminders await default_processor._schedule_reminders( tracker.events, tracker, default_channel, default_processor.nlg ) # cancel the third, fifth, and sixth reminder await cancel_reminder_and_check( tracker, default_processor, ReminderCancelled(intent="default"), 6, 3 ) async def test_reminder_cancelled_all( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, tracker_with_six_scheduled_reminders: DialogueStateTracker, ): tracker = tracker_with_six_scheduled_reminders await default_processor._schedule_reminders( tracker.events, tracker, default_channel, default_processor.nlg ) # cancel all reminders await cancel_reminder_and_check( tracker, default_processor, ReminderCancelled(), 6, 0 ) async def test_reminder_restart( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = uuid.uuid4().hex reminder = ReminderScheduled( "utter_greet", datetime.datetime.now(), kill_on_user_message=False ) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.update(reminder) tracker.update(Restarted()) # cancels the reminder tracker.update(UserUttered("test")) default_processor.tracker_store.save(tracker) await default_processor.handle_reminder( reminder, sender_id, default_channel, default_processor.nlg ) # retrieve the updated tracker t = default_processor.tracker_store.retrieve(sender_id) assert len(t.events) == 4 # nothing should have been executed @pytest.mark.parametrize( "event_to_apply,session_expiration_time_in_minutes,has_expired", [ # last user event is way in the past (UserUttered(timestamp=1), 60, True), # user event are very recent (UserUttered("hello", timestamp=time.time()), 120, False), # there is user event (ActionExecuted(ACTION_LISTEN_NAME, timestamp=time.time()), 60, False), # Old event, but sessions are disabled (UserUttered("hello", timestamp=1), 0, False), # there is no event (None, 1, False), ], ) async def test_has_session_expired( event_to_apply: Optional[Event], session_expiration_time_in_minutes: float, has_expired: bool, default_processor: MessageProcessor, ): sender_id = uuid.uuid4().hex default_processor.domain.session_config = SessionConfig( session_expiration_time_in_minutes, True ) # create new tracker without events tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) tracker.events.clear() # apply desired event if event_to_apply: tracker.update(event_to_apply) # noinspection PyProtectedMember assert default_processor._has_session_expired(tracker) == has_expired # noinspection PyProtectedMember async def test_update_tracker_session( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, monkeypatch: MonkeyPatch, ): sender_id = uuid.uuid4().hex tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) # patch `_has_session_expired()` so the `_update_tracker_session()` call actually # does something monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) await default_processor._update_tracker_session(tracker, default_channel) # the save is not called in _update_tracker_session() default_processor._save_tracker(tracker) # inspect tracker and make sure all events are present tracker = default_processor.tracker_store.retrieve(sender_id) assert list(tracker.events) == [ ActionExecuted(ACTION_LISTEN_NAME), ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), ] # noinspection PyProtectedMember async def test_update_tracker_session_with_metadata( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, monkeypatch: MonkeyPatch, ): sender_id = uuid.uuid4().hex tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) # patch `_has_session_expired()` so the `_update_tracker_session()` call actually # does something monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) metadata = {"metadataTestKey": "metadataTestValue"} await default_processor._update_tracker_session(tracker, default_channel, metadata) # the save is not called in _update_tracker_session() default_processor._save_tracker(tracker) # inspect tracker events and make sure SessionStarted event is present # and has metadata. tracker = default_processor.tracker_store.retrieve(sender_id) assert tracker.events.count(SessionStarted()) == 1 session_started_event_idx = tracker.events.index(SessionStarted()) session_started_event_metadata = tracker.events[session_started_event_idx].metadata assert session_started_event_metadata == metadata # noinspection PyProtectedMember async def test_update_tracker_session_with_slots( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, monkeypatch: MonkeyPatch, ): sender_id = uuid.uuid4().hex tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) # apply a user uttered and five slots user_event = UserUttered("some utterance") tracker.update(user_event) slot_set_events = [SlotSet(f"slot key {i}", f"test value {i}") for i in range(5)] for event in slot_set_events: tracker.update(event) # patch `_has_session_expired()` so the `_update_tracker_session()` call actually # does something monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) await default_processor._update_tracker_session(tracker, default_channel) # the save is not called in _update_tracker_session() default_processor._save_tracker(tracker) # inspect tracker and make sure all events are present tracker = default_processor.tracker_store.retrieve(sender_id) events = list(tracker.events) # the first three events should be up to the user utterance assert events[:2] == [ActionExecuted(ACTION_LISTEN_NAME), user_event] # next come the five slots assert events[2:7] == slot_set_events # the next two events are the session start sequence assert events[7:9] == [ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()] # the five slots should be reapplied assert events[9:14] == slot_set_events # finally an action listen, this should also be the last event assert events[14] == events[-1] == ActionExecuted(ACTION_LISTEN_NAME) # noinspection PyProtectedMember async def test_get_tracker_with_session_start( default_channel: CollectingOutputChannel, default_processor: MessageProcessor ): sender_id = uuid.uuid4().hex tracker = await default_processor.get_tracker_with_session_start( sender_id, default_channel ) # ensure session start sequence is present assert list(tracker.events) == [ ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), ] async def test_handle_message_with_session_start( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, monkeypatch: MonkeyPatch, ): sender_id = uuid.uuid4().hex entity = "name" slot_1 = {entity: "Core"} await default_processor.handle_message( UserMessage(f"/greet{json.dumps(slot_1)}", default_channel, sender_id) ) assert default_channel.latest_output() == { "recipient_id": sender_id, "text": "hey there Core!", } # patch processor so a session start is triggered monkeypatch.setattr(default_processor, "_has_session_expired", lambda _: True) slot_2 = {entity: "post-session start hello"} # handle a new message await default_processor.handle_message( UserMessage(f"/greet{json.dumps(slot_2)}", default_channel, sender_id) ) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) # make sure the sequence of events is as expected assert list(tracker.events) == [ ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), UserUttered( f"/greet{json.dumps(slot_1)}", {INTENT_NAME_KEY: "greet", "confidence": 1.0}, [{"entity": entity, "start": 6, "end": 22, "value": "Core"}], ), SlotSet(entity, slot_1[entity]), ActionExecuted("utter_greet"), BotUttered("hey there Core!", metadata={"template_name": "utter_greet"}), ActionExecuted(ACTION_LISTEN_NAME), ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), # the initial SlotSet is reapplied after the SessionStarted sequence SlotSet(entity, slot_1[entity]), ActionExecuted(ACTION_LISTEN_NAME), UserUttered( f"/greet{json.dumps(slot_2)}", {INTENT_NAME_KEY: "greet", "confidence": 1.0}, [ { "entity": entity, "start": 6, "end": 42, "value": "post-session start hello", } ], ), SlotSet(entity, slot_2[entity]), ActionExecuted(ACTION_LISTEN_NAME), ] # noinspection PyProtectedMember @pytest.mark.parametrize( "action_name, should_predict_another_action", [ (ACTION_LISTEN_NAME, False), (ACTION_SESSION_START_NAME, False), ("utter_greet", True), ], ) async def test_should_predict_another_action( default_processor: MessageProcessor, action_name: Text, should_predict_another_action: bool, ): assert ( default_processor.should_predict_another_action(action_name) == should_predict_another_action ) def test_get_next_action_probabilities_passes_interpreter_to_policies( monkeypatch: MonkeyPatch, ): policy = TEDPolicy() test_interpreter = Mock() def predict_action_probabilities( tracker: DialogueStateTracker, domain: Domain, interpreter: NaturalLanguageInterpreter, **kwargs, ) -> List[float]: assert interpreter == test_interpreter return [1, 0] policy.predict_action_probabilities = predict_action_probabilities ensemble = SimplePolicyEnsemble(policies=[policy]) domain = Domain.empty() processor = MessageProcessor( test_interpreter, ensemble, domain, InMemoryTrackerStore(domain), Mock() ) # This should not raise processor._get_next_action_probabilities( DialogueStateTracker.from_events("lala", [ActionExecuted(ACTION_LISTEN_NAME)]) ) @pytest.mark.parametrize( "predict_function", [lambda tracker, domain: [1, 0], lambda tracker, domain, some_bool=True: [1, 0]], ) def test_get_next_action_probabilities_pass_policy_predictions_without_interpreter_arg( predict_function: Callable, ): policy = TEDPolicy() policy.predict_action_probabilities = predict_function ensemble = SimplePolicyEnsemble(policies=[policy]) interpreter = Mock() domain = Domain.empty() processor = MessageProcessor( interpreter, ensemble, domain, InMemoryTrackerStore(domain), Mock() ) with pytest.warns(DeprecationWarning): processor._get_next_action_probabilities( DialogueStateTracker.from_events( "lala", [ActionExecuted(ACTION_LISTEN_NAME)] ) )
33.658402
87
0.721968
87c352d8bca75d9424f4840654b104bb4974696f
33,906
py
Python
zipline/history/history_container.py
wware/zipline
760bbced73b1d04cf37bf7bd098dac168e4ff823
[ "Apache-2.0" ]
null
null
null
zipline/history/history_container.py
wware/zipline
760bbced73b1d04cf37bf7bd098dac168e4ff823
[ "Apache-2.0" ]
null
null
null
zipline/history/history_container.py
wware/zipline
760bbced73b1d04cf37bf7bd098dac168e4ff823
[ "Apache-2.0" ]
null
null
null
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from bisect import insort_left from collections import namedtuple from itertools import groupby, product import logbook import numpy as np import pandas as pd from six import itervalues, iteritems, iterkeys from . history import HistorySpec from zipline.finance.trading import with_environment from zipline.utils.data import RollingPanel, _ensure_index from zipline.utils.munge import ffill, bfill logger = logbook.Logger('History Container') # The closing price is referred to by multiple names, # allow both for price rollover logic etc. CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'}) def ffill_buffer_from_prior_values(freq, field, buffer_frame, digest_frame, pv_frame, raw=False): """ Forward-fill a buffer frame, falling back to the end-of-period values of a digest frame if the buffer frame has leading NaNs. """ # convert to ndarray if necessary digest_values = digest_frame if raw and isinstance(digest_frame, pd.DataFrame): digest_values = digest_frame.values buffer_values = buffer_frame if raw and isinstance(buffer_frame, pd.DataFrame): buffer_values = buffer_frame.values nan_sids = pd.isnull(buffer_values[0]) if np.any(nan_sids) and len(digest_values): # If we have any leading nans in the buffer and we have a non-empty # digest frame, use the oldest digest values as the initial buffer # values. buffer_values[0, nan_sids] = digest_values[-1, nan_sids] nan_sids = pd.isnull(buffer_values[0]) if np.any(nan_sids): # If we still have leading nans, fall back to the last known values # from before the digest. key_loc = pv_frame.index.get_loc((freq.freq_str, field)) filler = pv_frame.values[key_loc, nan_sids] buffer_values[0, nan_sids] = filler if raw: filled = ffill(buffer_values) return filled return buffer_frame.ffill() def ffill_digest_frame_from_prior_values(freq, field, digest_frame, pv_frame, raw=False): """ Forward-fill a digest frame, falling back to the last known prior values if necessary. """ # convert to ndarray if necessary values = digest_frame if raw and isinstance(digest_frame, pd.DataFrame): values = digest_frame.values nan_sids = pd.isnull(values[0]) if np.any(nan_sids): # If we have any leading nans in the frame, use values from pv_frame to # seed values for those sids. key_loc = pv_frame.index.get_loc((freq.freq_str, field)) filler = pv_frame.values[key_loc, nan_sids] values[0, nan_sids] = filler if raw: filled = ffill(values) return filled return digest_frame.ffill() def freq_str_and_bar_count(history_spec): """ Helper for getting the frequency string and bar count from a history spec. """ return (history_spec.frequency.freq_str, history_spec.bar_count) @with_environment() def next_bar(spec, env): """ Returns a function that will return the next bar for a given datetime. """ if spec.frequency.unit_str == 'd': if spec.frequency.data_frequency == 'minute': return lambda dt: env.get_open_and_close( env.next_trading_day(dt), )[1] else: return env.next_trading_day else: return env.next_market_minute def compute_largest_specs(history_specs): """ Maps a Frequency to the largest HistorySpec at that frequency from an iterable of HistorySpecs. """ return {key: max(group, key=lambda f: f.bar_count) for key, group in groupby( sorted(history_specs, key=freq_str_and_bar_count), key=lambda spec: spec.frequency)} # tuples to store a change to the shape of a HistoryContainer FrequencyDelta = namedtuple( 'FrequencyDelta', ['freq', 'buffer_delta'], ) LengthDelta = namedtuple( 'LengthDelta', ['freq', 'delta'], ) HistoryContainerDeltaSuper = namedtuple( 'HistoryContainerDelta', ['field', 'frequency_delta', 'length_delta'], ) class HistoryContainerDelta(HistoryContainerDeltaSuper): """ A class representing a resize of the history container. """ def __new__(cls, field=None, frequency_delta=None, length_delta=None): """ field is a new field that was added. frequency is a FrequencyDelta representing a new frequency was added. length is a bar LengthDelta which is a frequency and a bar_count. If any field is None, then no change occurred of that type. """ return super(HistoryContainerDelta, cls).__new__( cls, field, frequency_delta, length_delta, ) @property def empty(self): """ Checks if the delta is empty. """ return (self.field is None and self.frequency_delta is None and self.length_delta is None) def normalize_to_data_freq(data_frequency, dt): if data_frequency == 'minute': return dt return pd.tslib.normalize_date(dt) class HistoryContainer(object): """ Container for all history panels and frames used by an algoscript. To be used internally by TradingAlgorithm, but *not* passed directly to the algorithm. Entry point for the algoscript is the result of `get_history`. """ VALID_FIELDS = { 'price', 'open_price', 'volume', 'high', 'low', 'close_price', } def __init__(self, history_specs, initial_sids, initial_dt, data_frequency, bar_data=None): """ A container to hold a rolling window of historical data within a user's algorithm. Args: history_specs (dict[Frequency:HistorySpec]): The starting history specs that this container should be able to service. initial_sids (set[Security or Int]): The starting sids to watch. initial_dt (datetime): The datetime to start collecting history from. bar_data (BarData): If this container is being constructed during handle_data, this is the BarData for the current bar to fill the buffer with. If this is constructed elsewhere, it is None. Returns: An instance of a new HistoryContainer """ # History specs to be served by this container. self.history_specs = history_specs self.largest_specs = compute_largest_specs( itervalues(self.history_specs) ) # The set of fields specified by all history specs self.fields = pd.Index( sorted(set(spec.field for spec in itervalues(history_specs))) ) self.sids = pd.Index( sorted(set(initial_sids or [])) ) self.data_frequency = data_frequency initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt) # This panel contains raw minutes for periods that haven't been fully # completed. When a frequency period rolls over, these minutes are # digested using some sort of aggregation call on the panel (e.g. `sum` # for volume, `max` for high, `min` for low, etc.). self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data) # Dictionaries with Frequency objects as keys. self.digest_panels, self.cur_window_starts, self.cur_window_closes = \ self.create_digest_panels(initial_sids, initial_dt) # Helps prop up the prior day panel against having a nan, when the data # has been seen. self.last_known_prior_values = pd.DataFrame( data=None, index=self.prior_values_index, columns=self.prior_values_columns, # Note: For bizarre "intricacies of the spaghetti that is pandas # indexing logic" reasons, setting this dtype prevents indexing # errors in update_last_known_values. This is safe for the time # being because our only forward-fillable fields are floats. If we # need to add a non-float-typed forward-fillable field, then we may # find ourselves having to track down and fix a pandas bug. dtype=np.float64, ) _ffillable_fields = None @property def ffillable_fields(self): if self._ffillable_fields is None: fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE) self._ffillable_fields = fillables return self._ffillable_fields @property def prior_values_index(self): index_values = list( product( (freq.freq_str for freq in self.unique_frequencies), # Only store prior values for forward-fillable fields. self.ffillable_fields, ) ) if index_values: return pd.MultiIndex.from_tuples(index_values) else: # MultiIndex doesn't gracefully support empty input, so we return # an empty regular Index if we have values. return pd.Index(index_values) @property def prior_values_columns(self): return self.sids @property def all_panels(self): yield self.buffer_panel for panel in self.digest_panels.values(): yield panel @property def unique_frequencies(self): """ Return an iterator over all the unique frequencies serviced by this container. """ return iterkeys(self.largest_specs) @with_environment() def _add_frequency(self, spec, dt, data, env=None): """ Adds a new frequency to the container. This reshapes the buffer_panel if needed. """ freq = spec.frequency self.largest_specs[freq] = spec new_buffer_len = 0 if freq.max_bars > self.buffer_panel.window_length: # More bars need to be held in the buffer_panel to support this # freq if freq.data_frequency \ != self.buffer_spec.frequency.data_frequency: # If the data_frequencies are not the same, then we need to # create a fresh buffer. self.buffer_panel = self.create_buffer_panel( dt, bar_data=data, ) new_buffer_len = None else: # The frequencies are the same, we just need to add more bars. self._resize_panel( self.buffer_panel, freq.max_bars, dt, self.buffer_spec.frequency, ) new_buffer_len = freq.max_minutes # update the current buffer_spec to reflect the new lenght. self.buffer_spec.bar_count = new_buffer_len + 1 if spec.bar_count > 1: # This spec has more than one bar, construct a digest panel for it. self.digest_panels[freq] = self._create_digest_panel( dt, spec=spec, env=env, ) else: self.cur_window_starts[freq] = dt self.cur_window_closes[freq] = freq.window_close( self.cur_window_starts[freq] ) self.last_known_prior_values = self.last_known_prior_values.reindex( index=self.prior_values_index, ) return FrequencyDelta(freq, new_buffer_len) def _add_field(self, field): """ Adds a new field to the container. """ # self.fields is already sorted, so we just need to insert the new # field in the correct index. ls = list(self.fields) insort_left(ls, field) self.fields = pd.Index(ls) # unset fillable fields cache self._ffillable_fields = None self._realign_fields() self.last_known_prior_values = self.last_known_prior_values.reindex( index=self.prior_values_index, ) return field @with_environment() def _add_length(self, spec, dt, env=None): """ Increases the length of the digest panel for spec.frequency. If this does not have a panel, and one is needed; a digest panel will be constructed. """ old_count = self.largest_specs[spec.frequency].bar_count self.largest_specs[spec.frequency] = spec delta = spec.bar_count - old_count panel = self.digest_panels.get(spec.frequency) if panel is None: # The old length for this frequency was 1 bar, meaning no digest # panel was held. We must construct a new one here. panel = self._create_digest_panel( dt, spec=spec, env=env, ) else: self._resize_panel( panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env, ) self.digest_panels[spec.frequency] = panel return LengthDelta(spec.frequency, delta) @with_environment() def _resize_panel(self, panel, size, dt, freq, env=None): """ Resizes a panel, fills the date_buf with the correct values. """ # This is the oldest datetime that will be shown in the current window # of the panel. oldest_dt = pd.Timestamp(panel.start_date, tz='utc',) delta = size - panel.window_length # Construct the missing dates. missing_dts = self._create_window_date_buf( delta, freq.unit_str, freq.data_frequency, oldest_dt, ) panel.extend_back(missing_dts) @with_environment() def _create_window_date_buf(self, window, unit_str, data_frequency, dt, env=None): """ Creates a window length date_buf looking backwards from dt. """ if unit_str == 'd': # Get the properly key'd datetime64 out of the pandas Timestamp if data_frequency != 'daily': arr = env.open_close_window( dt, window, offset=-window, ).market_close.astype('datetime64[ns]').values else: arr = env.open_close_window( dt, window, offset=-window, ).index.values return arr else: return env.market_minute_window( env.previous_market_minute(dt), window, step=-1, )[::-1].values @with_environment() def _create_panel(self, dt, spec, env=None): """ Constructs a rolling panel with a properly aligned date_buf. """ dt = normalize_to_data_freq(spec.frequency.data_frequency, dt) window = spec.bar_count - 1 date_buf = self._create_window_date_buf( window, spec.frequency.unit_str, spec.frequency.data_frequency, dt, env=env, ) panel = RollingPanel( window=window, items=self.fields, sids=self.sids, initial_dates=date_buf, ) return panel @with_environment() def _create_digest_panel(self, dt, spec, window_starts=None, window_closes=None, env=None): """ Creates a digest panel, setting the window_starts and window_closes. If window_starts or window_closes are None, then self.cur_window_starts or self.cur_window_closes will be used. """ freq = spec.frequency window_starts = window_starts if window_starts is not None \ else self.cur_window_starts window_closes = window_closes if window_closes is not None \ else self.cur_window_closes window_starts[freq] = freq.normalize(dt) window_closes[freq] = freq.window_close(window_starts[freq]) return self._create_panel(dt, spec, env=env) def ensure_spec(self, spec, dt, bar_data): """ Ensure that this container has enough space to hold the data for the given spec. This returns a HistoryContainerDelta to represent the changes in shape that the container made to support the new HistorySpec. """ updated = {} if spec.field not in self.fields: updated['field'] = self._add_field(spec.field) if spec.frequency not in self.largest_specs: updated['frequency_delta'] = self._add_frequency( spec, dt, bar_data, ) if spec.bar_count > self.largest_specs[spec.frequency].bar_count: updated['length_delta'] = self._add_length(spec, dt) return HistoryContainerDelta(**updated) def add_sids(self, to_add): """ Add new sids to the container. """ self.sids = pd.Index( sorted(self.sids + _ensure_index(to_add)), ) self._realign_sids() def drop_sids(self, to_drop): """ Remove sids from the container. """ self.sids = pd.Index( sorted(self.sids - _ensure_index(to_drop)), ) self._realign_sids() def _realign_sids(self): """ Realign our constituent panels after adding or removing sids. """ self.last_known_prior_values = self.last_known_prior_values.reindex( columns=self.sids, ) for panel in self.all_panels: panel.set_minor_axis(self.sids) def _realign_fields(self): self.last_known_prior_values = self.last_known_prior_values.reindex( index=self.prior_values_index, ) for panel in self.all_panels: panel.set_items(self.fields) @with_environment() def create_digest_panels(self, initial_sids, initial_dt, env=None): """ Initialize a RollingPanel for each unique panel frequency being stored by this container. Each RollingPanel pre-allocates enough storage space to service the highest bar-count of any history call that it serves. """ # Map from frequency -> first/last minute of the next digest to be # rolled for that frequency. first_window_starts = {} first_window_closes = {} # Map from frequency -> digest_panels. panels = {} for freq, largest_spec in iteritems(self.largest_specs): if largest_spec.bar_count == 1: # No need to allocate a digest panel; this frequency will only # ever use data drawn from self.buffer_panel. first_window_starts[freq] = freq.normalize(initial_dt) first_window_closes[freq] = freq.window_close( first_window_starts[freq] ) continue dt = initial_dt rp = self._create_digest_panel( dt, spec=largest_spec, window_starts=first_window_starts, window_closes=first_window_closes, env=env, ) panels[freq] = rp return panels, first_window_starts, first_window_closes def create_buffer_panel(self, initial_dt, bar_data): """ Initialize a RollingPanel containing enough minutes to service all our frequencies. """ max_bars_needed = max( freq.max_bars for freq in self.unique_frequencies ) freq = '1m' if self.data_frequency == 'minute' else '1d' spec = HistorySpec( max_bars_needed + 1, freq, None, None, self.data_frequency, ) rp = self._create_panel( initial_dt, spec, ) self.buffer_spec = spec if bar_data is not None: frame = self.frame_from_bardata(bar_data, initial_dt) rp.add_frame(initial_dt, frame) return rp def convert_columns(self, values): """ If columns have a specific type you want to enforce, overwrite this method and return the transformed values. """ return values def digest_bars(self, history_spec, do_ffill): """ Get the last (history_spec.bar_count - 1) bars from self.digest_panel for the requested HistorySpec. """ bar_count = history_spec.bar_count if bar_count == 1: # slicing with [1 - bar_count:] doesn't work when bar_count == 1, # so special-casing this. res = pd.DataFrame(index=[], columns=self.sids) return res.values, res.index field = history_spec.field # Panel axes are (field, dates, sids). We want just the entries for # the requested field, the last (bar_count - 1) data points, and all # sids. digest_panel = self.digest_panels[history_spec.frequency] frame = digest_panel.get_current(field, raw=True) if do_ffill: # Do forward-filling *before* truncating down to the requested # number of bars. This protects us from losing data if an illiquid # stock has a gap in its price history. filled = ffill_digest_frame_from_prior_values( history_spec.frequency, history_spec.field, frame, self.last_known_prior_values, raw=True # Truncate only after we've forward-filled ) indexer = slice(1 - bar_count, None) return filled[indexer], digest_panel.current_dates()[indexer] else: indexer = slice(1 - bar_count, None) return frame[indexer, :], digest_panel.current_dates()[indexer] def buffer_panel_minutes(self, buffer_panel, earliest_minute=None, latest_minute=None, raw=False): """ Get the minutes in @buffer_panel between @earliest_minute and @latest_minute, inclusive. @buffer_panel can be a RollingPanel or a plain Panel. If a RollingPanel is supplied, we call `get_current` to extract a Panel object. If no value is specified for @earliest_minute, use all the minutes we have up until @latest minute. If no value for @latest_minute is specified, use all values up until the latest minute. """ if isinstance(buffer_panel, RollingPanel): buffer_panel = buffer_panel.get_current(start=earliest_minute, end=latest_minute, raw=raw) return buffer_panel # Using .ix here rather than .loc because loc requires that the keys # are actually in the index, whereas .ix returns all the values between # earliest_minute and latest_minute, which is what we want. return buffer_panel.ix[:, earliest_minute:latest_minute, :] def frame_from_bardata(self, data, algo_dt): """ Create a DataFrame from the given BarData and algo dt. """ data = data._data frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan for j, sid in enumerate(self.sids): sid_data = data.get(sid) if not sid_data: continue if algo_dt != sid_data['dt']: continue for i, field in enumerate(self.fields): frame_data[i, j] = sid_data.get(field, np.nan) return pd.DataFrame( frame_data, index=self.fields.copy(), columns=self.sids.copy(), ) def update(self, data, algo_dt): """ Takes the bar at @algo_dt's @data, checks to see if we need to roll any new digests, then adds new data to the buffer panel. """ frame = self.frame_from_bardata(data, algo_dt) self.update_last_known_values() self.update_digest_panels(algo_dt, self.buffer_panel) self.buffer_panel.add_frame(algo_dt, frame) def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None): """ Check whether @algo_dt is greater than cur_window_close for any of our frequencies. If so, roll a digest for that frequency using data drawn from @buffer panel and insert it into the appropriate digest panels. If @freq_filter is specified, only use the given data to update frequencies on which the filter returns True. This takes `buffer_panel` as an argument rather than using self.buffer_panel so that this method can be used to add supplemental data from an external source. """ for frequency in filter(freq_filter, self.unique_frequencies): # We don't keep a digest panel if we only have a length-1 history # spec for a given frequency digest_panel = self.digest_panels.get(frequency, None) while algo_dt > self.cur_window_closes[frequency]: earliest_minute = self.cur_window_starts[frequency] latest_minute = self.cur_window_closes[frequency] minutes_to_process = self.buffer_panel_minutes( buffer_panel, earliest_minute=earliest_minute, latest_minute=latest_minute, raw=True ) if digest_panel is not None: # Create a digest from minutes_to_process and add it to # digest_panel. digest_frame = self.create_new_digest_frame( minutes_to_process, self.fields, self.sids ) digest_panel.add_frame( latest_minute, digest_frame, self.fields, self.sids ) # Update panel start/close for this frequency. self.cur_window_starts[frequency] = \ frequency.next_window_start(latest_minute) self.cur_window_closes[frequency] = \ frequency.window_close(self.cur_window_starts[frequency]) def frame_to_series(self, field, frame, columns=None): """ Convert a frame with a DatetimeIndex and sid columns into a series with a sid index, using the aggregator defined by the given field. """ if isinstance(frame, pd.DataFrame): columns = frame.columns frame = frame.values if not len(frame): return pd.Series( data=(0 if field == 'volume' else np.nan), index=columns, ).values if field in ['price', 'close_price']: # shortcircuit for full last row vals = frame[-1] if np.all(~np.isnan(vals)): return vals return ffill(frame)[-1] elif field == 'open_price': return bfill(frame)[0] elif field == 'volume': return np.nansum(frame, axis=0) elif field == 'high': return np.nanmax(frame, axis=0) elif field == 'low': return np.nanmin(frame, axis=0) else: raise ValueError("Unknown field {}".format(field)) def aggregate_ohlcv_panel(self, fields, ohlcv_panel, items=None, minor_axis=None): """ Convert an OHLCV Panel into a DataFrame by aggregating each field's frame into a Series. """ vals = ohlcv_panel if isinstance(ohlcv_panel, pd.Panel): vals = ohlcv_panel.values items = ohlcv_panel.items minor_axis = ohlcv_panel.minor_axis data = [ self.frame_to_series( field, vals[items.get_loc(field)], minor_axis ) for field in fields ] return np.array(data) def create_new_digest_frame(self, buffer_minutes, items=None, minor_axis=None): """ Package up minutes in @buffer_minutes into a single digest frame. """ return self.aggregate_ohlcv_panel( self.fields, buffer_minutes, items=items, minor_axis=minor_axis ) def update_last_known_values(self): """ Store the non-NaN values from our oldest frame in each frequency. """ ffillable = self.ffillable_fields if not len(ffillable): return for frequency in self.unique_frequencies: digest_panel = self.digest_panels.get(frequency, None) if digest_panel: oldest_known_values = digest_panel.oldest_frame(raw=True) else: oldest_known_values = self.buffer_panel.oldest_frame(raw=True) oldest_vals = oldest_known_values oldest_columns = self.fields for field in ffillable: f_idx = oldest_columns.get_loc(field) field_vals = oldest_vals[f_idx] # isnan would be fast, possible to use? non_nan_sids = np.where(pd.notnull(field_vals)) key = (frequency.freq_str, field) key_loc = self.last_known_prior_values.index.get_loc(key) self.last_known_prior_values.values[ key_loc, non_nan_sids ] = field_vals[non_nan_sids] def get_history(self, history_spec, algo_dt): """ Main API used by the algoscript is mapped to this function. Selects from the overarching history panel the values for the @history_spec at the given @algo_dt. """ field = history_spec.field do_ffill = history_spec.ffill # Get our stored values from periods prior to the current period. digest_frame, index = self.digest_bars(history_spec, do_ffill) # Get minutes from our buffer panel to build the last row of the # returned frame. buffer_panel = self.buffer_panel_minutes( self.buffer_panel, earliest_minute=self.cur_window_starts[history_spec.frequency], raw=True ) buffer_frame = buffer_panel[self.fields.get_loc(field)] if do_ffill: buffer_frame = ffill_buffer_from_prior_values( history_spec.frequency, field, buffer_frame, digest_frame, self.last_known_prior_values, raw=True ) last_period = self.frame_to_series(field, buffer_frame, self.sids) return fast_build_history_output(digest_frame, last_period, algo_dt, index=index, columns=self.sids) def fast_build_history_output(buffer_frame, last_period, algo_dt, index=None, columns=None): """ Optimized concatenation of DataFrame and Series for use in HistoryContainer.get_history. Relies on the fact that the input arrays have compatible shapes. """ buffer_values = buffer_frame if isinstance(buffer_frame, pd.DataFrame): buffer_values = buffer_frame.values index = buffer_frame.index columns = buffer_frame.columns return pd.DataFrame( data=np.vstack( [ buffer_values, last_period, ] ), index=fast_append_date_to_index( index, pd.Timestamp(algo_dt) ), columns=columns, ) def fast_append_date_to_index(index, timestamp): """ Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not appear to work. """ return pd.DatetimeIndex( np.hstack( [ index.values, [timestamp.asm8], ] ), tz='UTC', )
34.811088
79
0.58379
af5fb47de419dd28e496529971a4216375769167
81,292
py
Python
solutions/previous_solution_python/contest_leetcode_5461.py
YuhanShi53/Leetcode_solutions
cdcad34656d25d6af09b226e17250c6070305ab0
[ "MIT" ]
null
null
null
solutions/previous_solution_python/contest_leetcode_5461.py
YuhanShi53/Leetcode_solutions
cdcad34656d25d6af09b226e17250c6070305ab0
[ "MIT" ]
null
null
null
solutions/previous_solution_python/contest_leetcode_5461.py
YuhanShi53/Leetcode_solutions
cdcad34656d25d6af09b226e17250c6070305ab0
[ "MIT" ]
null
null
null
class Solution: def numSub(self, s: str) -> int: s = s.split('0') res = 0 for x in s: y = len(x) res += (y + 1) * y // 2 return res if __name__ == '__main__': s = "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100101011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100001001" res = Solution().numSub(s) print(res)
6,253.230769
81,026
0.998056
688d95ae217e7b53ebae30d519a742382f10c1f7
575
py
Python
mtaa/forms.py
ibukamshindi/Neighborhood-Watch
cc875b3f3ce562bd424921def24262578e881700
[ "MIT" ]
null
null
null
mtaa/forms.py
ibukamshindi/Neighborhood-Watch
cc875b3f3ce562bd424921def24262578e881700
[ "MIT" ]
null
null
null
mtaa/forms.py
ibukamshindi/Neighborhood-Watch
cc875b3f3ce562bd424921def24262578e881700
[ "MIT" ]
null
null
null
from django import forms from .models import * class AddHoodForm(forms.ModelForm): class Meta: model = Hood fields = ('hood_name', 'hood_photo', 'population') class ProfileForm(forms.ModelForm): class Meta: model = Profile fields = ('name', 'neighborhood','email_address') class BusinessForm(forms.ModelForm): class Meta: model = Business fields = ('business_name', 'email', 'hood') class NotificationForm(forms.ModelForm): class Meta: model= Notification fields = ('post', 'posted_by', 'hood')
30.263158
58
0.645217
d88d1248aa205ae7dd93bf2aa1239f394a96a33b
1,768
py
Python
petastorm/tests/test_end_to_end_predicates_impl.py
rizalgowandy/petastorm
f7aad8cf5ef6878231b335911e3e95541b388d40
[ "Apache-2.0" ]
1,393
2018-08-17T19:01:12.000Z
2022-03-31T22:33:45.000Z
petastorm/tests/test_end_to_end_predicates_impl.py
rizalgowandy/petastorm
f7aad8cf5ef6878231b335911e3e95541b388d40
[ "Apache-2.0" ]
554
2018-08-17T19:59:42.000Z
2022-03-31T23:15:07.000Z
petastorm/tests/test_end_to_end_predicates_impl.py
rizalgowandy/petastorm
f7aad8cf5ef6878231b335911e3e95541b388d40
[ "Apache-2.0" ]
241
2018-08-17T20:03:10.000Z
2022-03-22T11:18:47.000Z
# Copyright (c) 2017-2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """In order for the predicates to be accessible from a process_pool and the test_end_to_end.py is ran directly as __main__, these predicates have to be implemented in a separate module""" from petastorm.predicates import PredicateBase class PartitionKeyInSetPredicate(PredicateBase): def __init__(self, inclusion_values): self._inclusion_values = inclusion_values def get_fields(self): return {'partition_key'} def do_include(self, values): return values['partition_key'] in self._inclusion_values class EqualPredicate(PredicateBase): def __init__(self, values): self._values = values def get_fields(self): return list(self._values.keys()) def do_include(self, values): return self._values == values class VectorizedEqualPredicate(PredicateBase): def __init__(self, values): self._values = values def get_fields(self): return list(self._values.keys()) def do_include(self, values): result = [True] * len(values) for field_name in self._values.keys(): result &= values[field_name] == self._values[field_name] return result
32.740741
113
0.719457
213d929a3c980e9fe834f7fa747acce8f309753a
1,314
py
Python
views/parameterizer/state.py
computablelabs/compas
fbde4e2ef61ebadc5ffd6453bff60f678e6002d6
[ "MIT" ]
1
2019-10-24T19:16:10.000Z
2019-10-24T19:16:10.000Z
views/parameterizer/state.py
computablelabs/compas
fbde4e2ef61ebadc5ffd6453bff60f678e6002d6
[ "MIT" ]
7
2019-10-31T14:55:07.000Z
2019-12-11T19:29:15.000Z
views/parameterizer/state.py
computablelabs/compas
fbde4e2ef61ebadc5ffd6453bff60f678e6002d6
[ "MIT" ]
null
null
null
from asciimatics.widgets import Label, Divider, Text from viewmodels.parameterizer.state import State """ Our subviews simply take a layout and inject their content into it """ def inject_parameterizer_state(layout, col=0): layout.add_widget(Label('Parameterizer'), col) layout.add_widget(Divider(line_char='-'), col) layout.add_widget(Text('Address:', 'p11r_address'), col) layout.add_widget(Text('Owner:', 'p11r_owner'), col) layout.add_widget(Text('Cost Per Byte:', 'p11r_cost_per_byte'), col) layout.add_widget(Text('Stake:', 'p11r_stake'), col) layout.add_widget(Text('Price Floor:', 'p11r_price_floor'), col) layout.add_widget(Text('Spread:', 'p11r_spread'), col) layout.add_widget(Text('List Reward:', 'p11r_list_reward'), col) layout.add_widget(Text('Plurality:', 'p11r_plurality'), col) layout.add_widget(Text('Vote By:', 'p11r_vote_by'), col) layout.add_widget(Text('Maker Payment:', 'p11r_maker_payment'), col) layout.add_widget(Text('Datatrust Payment:', 'p11r_backend_payment'), col) layout.add_widget(Text('Reserve Payment:', 'p11r_reserve_payment'), col) def hydrate_parameterizer_state(data={}): """ Given a dictionary, allow the viewmodel to hydrate the data needed by this view """ vm = State() return vm.hydrate(data)
46.928571
83
0.716895
4097af1bc3aebeeffa30dbeea0a2cffefa6f4c51
1,651
py
Python
scripts/create_cert.py
g10f/sso
ba6eb712add388c69d4880f5620a2e4ce42d3fee
[ "BSD-3-Clause" ]
3
2021-05-16T17:06:57.000Z
2021-05-28T17:14:05.000Z
scripts/create_cert.py
g10f/sso
ba6eb712add388c69d4880f5620a2e4ce42d3fee
[ "BSD-3-Clause" ]
null
null
null
scripts/create_cert.py
g10f/sso
ba6eb712add388c69d4880f5620a2e4ce42d3fee
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import os import uuid import subprocess from OpenSSL import crypto def create_self_signed_cert(cert_dir, cn, serial=int(uuid.uuid4().hex, 16)): """ create a new self-signed cert and keypair and write them into that directory. """ # create a key pair k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, 1024) # create a self-signed cert cert = crypto.X509() cert.set_version(2) cert.get_subject().CN = cn cert.set_serial_number(serial) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(1 * 365 * 24 * 60 * 60) cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.add_extensions([ crypto.X509Extension("basicConstraints", True, "CA:FALSE"), crypto.X509Extension("keyUsage", True, "digitalSignature"), crypto.X509Extension("extendedKeyUsage", True, "clientAuth"), ]) cert.sign(k, 'sha1') fingerprint = cert.digest('sha1').replace(':', '').lower() cert_file = os.path.join(cert_dir, "%s.crt" % fingerprint) key_file = os.path.join(cert_dir, "%s.key" % fingerprint) pub_file = os.path.join(cert_dir, "%s.pub" % fingerprint) with open(cert_file, "wt") as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) with open(key_file, "wt") as f: f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) with open(pub_file, "wt") as f: output = subprocess.check_output(["openssl", "x509", "-in", cert_file, "-pubkey", "-noout"]) f.write(output) def main(): create_self_signed_cert("../certs", "sso.kagyu.net") if __name__ == "__main__": main()
29.482143
100
0.656572
eec43671ab7edf56ade093b33091d6bf5dd51603
451
py
Python
misc/config.py
monill/timer
b03e85b30b74fc785ddc3e0e02eb1519fa3e3399
[ "MIT" ]
null
null
null
misc/config.py
monill/timer
b03e85b30b74fc785ddc3e0e02eb1519fa3e3399
[ "MIT" ]
null
null
null
misc/config.py
monill/timer
b03e85b30b74fc785ddc3e0e02eb1519fa3e3399
[ "MIT" ]
null
null
null
# Gui constants TIMER_WIDTH = 200 TIMER_HEIGHT = 100 IDLE_TEXT = "--:--" BOMB_TIME = 40 HURRY_UP_THRESHOLD = 15 WITHOUT_DEFUSER_THRESHOLD = 10 WITH_DEFUSER_THRESHOLD = 5 BG_COLOUR = "#191919" FG_COLOUR = "#FFFFFF" HURRY_UP_COLOUR = "#FFCC00" WITHOUT_DEFUSER_COLOUR = "#FFCC00" WITH_DEFUSER_COLOUR = "#FF4D4D" FONT = ("Helvetica", 50, "bold") TEXT_POSITION = (TIMER_WIDTH / 2, TIMER_HEIGHT / 2) GUI_CHECK_MSG_INTERVAL = 100 DEBUG_ENABLED = False
18.791667
51
0.742794
7f0d0a58ca9ec8594f9f9f23f6e63e6ead22c642
154
py
Python
venv/bin/django-admin.py
OwinoLucas/Awwards-Clone
502bb47f08df4f01fecdc45debba0a4e6d3e51b4
[ "Unlicense" ]
null
null
null
venv/bin/django-admin.py
OwinoLucas/Awwards-Clone
502bb47f08df4f01fecdc45debba0a4e6d3e51b4
[ "Unlicense" ]
7
2021-03-19T04:35:59.000Z
2021-09-08T02:08:35.000Z
venv/bin/django-admin.py
OwinoLucas/Awwards-Clone
502bb47f08df4f01fecdc45debba0a4e6d3e51b4
[ "Unlicense" ]
null
null
null
#!/home/lucas/Documents/Awwards/venv/bin/python from django.core import management if __name__ == "__main__": management.execute_from_command_line()
25.666667
47
0.785714
dd34b1d6a8176f5094fae04cec61a6fe4a0aa46e
5,789
py
Python
mayan/apps/metadata/serializers.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
2
2021-09-12T19:41:19.000Z
2021-09-12T19:41:20.000Z
mayan/apps/metadata/serializers.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
37
2021-09-13T01:00:12.000Z
2021-10-02T03:54:30.000Z
mayan/apps/metadata/serializers.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
1
2021-09-22T13:17:30.000Z
2021-09-22T13:17:30.000Z
from django.core.exceptions import ValidationError as DjangoValidationError from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import ValidationError from rest_framework.reverse import reverse from mayan.apps.documents.serializers.document_serializers import ( DocumentSerializer ) from mayan.apps.documents.serializers.document_type_serializers import ( DocumentTypeSerializer ) from mayan.apps.rest_api.serializer_mixins import CreateOnlyFieldSerializerMixin from mayan.apps.rest_api.relations import FilteredPrimaryKeyRelatedField from .models import DocumentMetadata, DocumentTypeMetadataType, MetadataType from .permissions import permission_document_metadata_add class MetadataTypeSerializer(serializers.HyperlinkedModelSerializer): class Meta: extra_kwargs = { 'url': { 'lookup_field': 'pk', 'lookup_url_kwarg': 'metadata_type_id', 'view_name': 'rest_api:metadatatype-detail' }, } fields = ( 'default', 'id', 'label', 'lookup', 'name', 'parser', 'url', 'validation' ) model = MetadataType class DocumentTypeMetadataTypeSerializer(serializers.HyperlinkedModelSerializer): document_type = DocumentTypeSerializer(read_only=True) metadata_type = MetadataTypeSerializer(read_only=True) url = serializers.SerializerMethodField() class Meta: fields = ('document_type', 'id', 'metadata_type', 'required', 'url') model = DocumentTypeMetadataType def get_url(self, instance): return reverse( viewname='rest_api:documenttypemetadatatype-detail', kwargs={ 'document_type_id': instance.document_type.pk, 'metadata_type_id': instance.pk }, request=self.context['request'], format=self.context['format'] ) class NewDocumentTypeMetadataTypeSerializer(serializers.ModelSerializer): metadata_type_id = serializers.IntegerField( help_text=_('Primary key of the metadata type to be added.'), write_only=True ) url = serializers.SerializerMethodField() class Meta: fields = ( 'id', 'metadata_type_id', 'required', 'url' ) model = DocumentTypeMetadataType def get_url(self, instance): return reverse( viewname='rest_api:documenttypemetadatatype-detail', kwargs={ 'document_type_id': instance.document_type.pk, 'metadata_type_id': instance.pk }, request=self.context['request'], format=self.context['format'] ) def validate(self, attrs): attrs['document_type'] = self.context['document_type'] attrs['metadata_type'] = MetadataType.objects.get( pk=attrs.pop('metadata_type_id') ) instance = DocumentTypeMetadataType(**attrs) try: instance.full_clean() except DjangoValidationError as exception: raise ValidationError(exception) return attrs class WritableDocumentTypeMetadataTypeSerializer(serializers.ModelSerializer): url = serializers.SerializerMethodField() class Meta: fields = ( 'id', 'required', 'url' ) model = DocumentTypeMetadataType def get_url(self, instance): return reverse( viewname='rest_api:documenttypemetadatatype-detail', kwargs={ 'document_type_id': instance.document_type.pk, 'metadata_type_id': instance.pk }, request=self.context['request'], format=self.context['format'] ) class DocumentMetadataSerializer( CreateOnlyFieldSerializerMixin, serializers.HyperlinkedModelSerializer ): metadata_type_id = FilteredPrimaryKeyRelatedField( help_text=_( 'Primary key of the metadata type to be added to the document.' ), source_model=MetadataType, source_permission=permission_document_metadata_add ) document = DocumentSerializer(read_only=True) metadata_type = MetadataTypeSerializer(read_only=True) url = serializers.SerializerMethodField() class Meta: create_only_fields = ('metadata_type_id',) fields = ( 'document', 'id', 'metadata_type', 'metadata_type_id', 'url', 'value' ) model = DocumentMetadata read_only_fields = ('document', 'metadata_type',) def get_url(self, instance): return reverse( viewname='rest_api:documentmetadata-detail', kwargs={ 'document_id': instance.document.pk, 'metadata_id': instance.pk }, request=self.context['request'], format=self.context['format'] ) def validate(self, attrs): if self.instance: self.instance.value = attrs['value'] try: self.instance.full_clean() except DjangoValidationError as exception: raise ValidationError(exception) attrs['value'] = self.instance.value return attrs else: attrs['document'] = self.context['external_object'] attrs['metadata_type'] = MetadataType.objects.get( pk=attrs.pop('metadata_type_id').pk ) instance = DocumentMetadata(**attrs) try: instance.full_clean() except DjangoValidationError as exception: raise ValidationError(exception) attrs['value'] = instance.value return attrs
35.084848
82
0.634479
5ef864aad937bf098827ccb045d6ca12a6f532b5
1,258
py
Python
ml/ml_logger.py
T-Sumida/sound-recognition
e043812bc6deb6d7f3fa7200472ce6aa58952095
[ "MIT" ]
1
2021-06-02T21:39:26.000Z
2021-06-02T21:39:26.000Z
ml/ml_logger.py
T-Sumida/sound-recognition
e043812bc6deb6d7f3fa7200472ce6aa58952095
[ "MIT" ]
null
null
null
ml/ml_logger.py
T-Sumida/sound-recognition
e043812bc6deb6d7f3fa7200472ce6aa58952095
[ "MIT" ]
null
null
null
# coding: utf-8 import mlflow from typing import Dict class MlLogger(object): def __init__(self, exp_name: str, tag: Dict): """mlflowの初期化 Args: exp_name (str): 実験名 tag (Dict): タグ情報 """ mlflow.set_experiment(exp_name) mlflow.set_tags(tag) def logging_params(self, params: Dict) -> None: """パラメータを保存する Args: params (Dict): パラメータ """ mlflow.log_params(params) def logging_step_metric(self, step: int, metrics: Dict) -> None: """段階的にメトリクスを保存する Args: step (int): ステップ数 metrics (Dict): メトリクス """ mlflow.log_metrics(metrics, step=step) def logging_metrics(self, metrics: Dict) -> None: """メトリクスを保存する Args: metrics (Dict): メトリクス """ mlflow.log_metrics(metrics) def logging_files(self, dir_path: str) -> None: """指定されたディレクトリ配下のファイルを保存する Args: dir_path (str): ディレクトリパス """ mlflow.log_artifacts(dir_path) def logging_file(self, file_path: str) -> None: """指定されたファイルを保存する Args: dir_path (str): ファイルパス """ mlflow.log_artifact(file_path)
21.689655
68
0.54531
d8cb92d2440e78854bccded907d171737d327f7d
882
py
Python
lib/apiclient/ext/django_orm.py
motord/Motorcycle-Diaries
bb5e5e2d4d79573b4231e760d7662db26c03a55e
[ "BSD-3-Clause" ]
1
2022-01-10T03:07:22.000Z
2022-01-10T03:07:22.000Z
lib/apiclient/ext/django_orm.py
motord/Motorcycle-Diaries
bb5e5e2d4d79573b4231e760d7662db26c03a55e
[ "BSD-3-Clause" ]
null
null
null
lib/apiclient/ext/django_orm.py
motord/Motorcycle-Diaries
bb5e5e2d4d79573b4231e760d7662db26c03a55e
[ "BSD-3-Clause" ]
null
null
null
from django.db import models class OAuthCredentialsField(models.Field): __metaclass__ = models.SubfieldBase def db_type(self): return 'VARCHAR' def to_python(self, value): if value is None: return None if isinstance(value, apiclient.oauth.Credentials): return value return pickle.loads(base64.b64decode(value)) def get_db_prep_value(self, value): return base64.b64encode(pickle.dumps(value)) class FlowThreeLeggedField(models.Field): __metaclass__ = models.SubfieldBase def db_type(self): return 'VARCHAR' def to_python(self, value): print "In to_python", value if value is None: return None if isinstance(value, apiclient.oauth.FlowThreeLegged): return value return pickle.loads(base64.b64decode(value)) def get_db_prep_value(self, value): return base64.b64encode(pickle.dumps(value))
22.615385
58
0.723356
a2428c4314ebe89f10ec337c5717c6e27e0eb1d0
10,095
py
Python
parseridge/parser/model.py
jgontrum/parseridge
f87ef82a9468addeb09a91c3b5310db38840bf0f
[ "Apache-2.0" ]
6
2019-10-06T23:00:29.000Z
2020-04-08T02:04:24.000Z
parseridge/parser/model.py
jgontrum/parseridge
f87ef82a9468addeb09a91c3b5310db38840bf0f
[ "Apache-2.0" ]
2
2020-04-28T12:06:23.000Z
2020-04-28T13:30:03.000Z
parseridge/parser/model.py
jgontrum/parseridge
f87ef82a9468addeb09a91c3b5310db38840bf0f
[ "Apache-2.0" ]
1
2019-12-23T00:53:38.000Z
2019-12-23T00:53:38.000Z
from typing import List, Tuple, Optional import torch import torch.nn as nn from parseridge.corpus.relations import Relations from parseridge.corpus.vocabulary import Vocabulary from parseridge.parser.modules.add_and_norm_layer import AddAndNormLayer from parseridge.parser.modules.configuration_encoder import ( CONFIGURATION_ENCODERS, EvalAttentionReporter, ) from parseridge.parser.modules.data_parallel import Module from parseridge.parser.modules.external_embeddings import ExternalEmbeddings from parseridge.parser.modules.identity import Identity from parseridge.parser.modules.input_encoder import InputEncoder from parseridge.parser.modules.mlp import MultilayerPerceptron from parseridge.parser.modules.utils import initialize_xavier_dynet_ class ParseridgeModel(Module): def __init__( self, relations: Relations, vocabulary: Vocabulary, num_stack: int = 3, num_buffer: int = 1, lstm_dropout: float = 0.33, mlp_dropout: float = 0.25, embedding_size: int = 100, lstm_hidden_size: int = 125, lstm_layers: int = 2, input_encoder_type: str = "lstm", transition_mlp_layers: List[int] = None, relation_mlp_layers: List[int] = None, transition_mlp_activation: nn.Module = nn.Tanh, relation_mlp_activation: nn.Module = nn.Tanh, mlp_input_transformation_layers: List[int] = None, encoder_output_transformation_layers: List[int] = None, embeddings: ExternalEmbeddings = None, self_attention_heads: int = 10, self_attention_layers: int = 2, configuration_encoder: str = "static", scale_query: int = None, scale_key: int = None, scale_value: int = None, scoring_function: str = "dot", normalization_function: str = "softmax", attention_reporter: Optional[EvalAttentionReporter] = None, reduce_dimensionality: Optional[int] = None, device: str = "cpu", ) -> None: super().__init__(device=device) """ Parameter definitions """ self.relations = relations self.vocabulary = vocabulary self.input_encoder_type = input_encoder_type self.embedding_size = embedding_size self.lstm_hidden_size = lstm_hidden_size self.lstm_dropout = lstm_dropout self.lstm_layers = lstm_layers self.mlp_dropout = mlp_dropout if relation_mlp_layers is None: relation_mlp_layers = [100] if transition_mlp_layers is None: transition_mlp_layers = [100] self.num_transitions = 4 # LEFT_ARC, RIGHT_ARC, SHIFT and SWAP self.num_labels = len(self.relations) # Take the top n entries from buffer / stack as input into the MLP self.stack_size = num_stack self.buffer_size = num_buffer """ Module definitions """ self.input_encoder = InputEncoder( token_vocabulary=self.vocabulary, token_embedding_size=self.embedding_size, hidden_size=self.lstm_hidden_size, layers=self.lstm_layers, dropout=self.lstm_dropout, sum_directions=False, reduce_dimensionality=reduce_dimensionality, mode=self.input_encoder_type, self_attention_heads=self_attention_heads, self_attention_layers=self_attention_layers, device=self.device, ) self.encoder_output_transform = ( MultilayerPerceptron( input_size=self.input_encoder.output_size, hidden_sizes=[512], output_size=self.input_encoder.output_size, activation=nn.Tanh, ) if encoder_output_transformation_layers else Identity() ) self.encoder_output_transform_norm = ( AddAndNormLayer(model_size=self.input_encoder.output_size) if encoder_output_transformation_layers else Identity() ) """Computes attention over the output of the input encoder given the state of the action encoder. """ self.configuration_encoder = CONFIGURATION_ENCODERS[configuration_encoder]( model_size=self.input_encoder.output_size, scale_query=scale_query, scale_key=scale_key, scale_value=scale_value, scoring_function=scoring_function, normalization_function=normalization_function, num_stack=self.stack_size, num_buffer=self.buffer_size, reporter=attention_reporter, device=self.device, ) self.mlp_in_size = self.configuration_encoder.output_size self.mlp_input_transform = ( MultilayerPerceptron( input_size=self.mlp_in_size, hidden_sizes=mlp_input_transformation_layers, output_size=self.mlp_in_size, activation=nn.ReLU, ) if mlp_input_transformation_layers else Identity() ) self.mlp_input_transform_norm = ( AddAndNormLayer(model_size=self.mlp_in_size) if mlp_input_transformation_layers else Identity() ) self.transition_mlp = MultilayerPerceptron( input_size=self.mlp_in_size, hidden_sizes=transition_mlp_layers, output_size=self.num_transitions, dropout=self.mlp_dropout, activation=transition_mlp_activation, device=self.device, ) self.relation_mlp = MultilayerPerceptron( input_size=self.mlp_in_size, hidden_sizes=relation_mlp_layers, output_size=self.num_labels, dropout=self.mlp_dropout, activation=relation_mlp_activation, device=self.device, ) self._mlp_padding_param = nn.Parameter( torch.zeros( self.input_encoder.output_size, dtype=torch.float, device=self.device ) ) self._mlp_padding = None initialize_xavier_dynet_(self) if embeddings: self.input_encoder.load_external_embeddings(embeddings) def reset_(self) -> None: self._mlp_padding = nn.Tanh()(self._mlp_padding_param) def get_contextualized_input( self, token_sequences: torch.Tensor, sentence_lengths: torch.Tensor ) -> torch.Tensor: outputs, _ = self.input_encoder(token_sequences, sentence_lengths) return outputs def compute_mlp_output( self, contextualized_input_batch: torch.Tensor, stacks: torch.Tensor, stack_lengths: torch.Tensor, buffers: torch.Tensor, buffer_lengths: torch.Tensor, finished_tokens: Optional[torch.Tensor] = None, finished_tokens_lengths: Optional[torch.Tensor] = None, sentence_lengths: Optional[torch.Tensor] = None, sentence_features: Optional[torch.Tensor] = None, sentence_ids: Optional[List[str]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: contextualized_input_batch = self.encoder_output_transform_norm( input=contextualized_input_batch, output=self.encoder_output_transform(contextualized_input_batch), ) mlp_input = self.configuration_encoder( contextualized_input_batch=contextualized_input_batch, stacks=stacks, buffers=buffers, stack_lengths=stack_lengths, buffer_lengths=buffer_lengths, finished_tokens=finished_tokens, finished_tokens_lengths=finished_tokens_lengths, sentence_lengths=sentence_lengths, sentence_features=sentence_features, sentence_ids=sentence_ids, padding=self._mlp_padding, ) mlp_input = self.mlp_input_transform_norm( input=mlp_input, output=self.mlp_input_transform(mlp_input) ) # Use output and feed it into MLP transitions_output = self.transition_mlp(mlp_input) relations_output = self.relation_mlp(mlp_input) return transitions_output, relations_output def forward( self, stacks: torch.Tensor, stack_lengths: torch.Tensor, buffers: torch.Tensor, buffer_lengths: torch.Tensor, sentence_tokens: Optional[torch.Tensor] = None, sentence_lengths: Optional[torch.Tensor] = None, contextualized_input_batch: Optional[List[torch.Tensor]] = None, finished_tokens: Optional[torch.Tensor] = None, finished_tokens_lengths: Optional[torch.Tensor] = None, sentence_features: Optional[torch.Tensor] = None, sentence_ids: Optional[List[str]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: if contextualized_input_batch is None: assert sentence_tokens is not None assert sentence_lengths is not None # Pass all sentences through the input encoder to create contextualized # token tensors. contextualized_input_batch = self.get_contextualized_input( sentence_tokens, sentence_lengths ) else: # If we already have the output of the input encoder as lists, # create a tensor out of them. This happens usually in prediction. contextualized_input_batch = torch.stack(contextualized_input_batch) transitions_output, relations_output = self.compute_mlp_output( contextualized_input_batch=contextualized_input_batch, stacks=stacks, stack_lengths=stack_lengths, buffers=buffers, buffer_lengths=buffer_lengths, sentence_lengths=sentence_lengths, finished_tokens=finished_tokens, finished_tokens_lengths=finished_tokens_lengths, sentence_features=sentence_features, sentence_ids=sentence_ids, ) return transitions_output, relations_output
36.978022
89
0.658049
70d6ee22e8701cd028966d810ff94190757918ad
9,037
py
Python
venv/share/pyshared/pyx/pswriter.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
venv/share/pyshared/pyx/pswriter.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
venv/share/pyshared/pyx/pswriter.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- # # # Copyright (C) 2005-2011 Jörg Lehmann <joergl@users.sourceforge.net> # Copyright (C) 2005-2011 André Wobst <wobsta@users.sourceforge.net> # # This file is part of PyX (http://pyx.sourceforge.net/). # # PyX is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # PyX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyX; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA import cStringIO, copy, time, math import bbox, config, style, version, unit, trafo class PSregistry: def __init__(self): # in order to keep a consistent order of the registered resources we # not only store them in a hash but also keep an ordered list (up to a # possible merging of resources, in which case the first instance is # kept) self.resourceshash = {} self.resourceslist = [] def add(self, resource): rkey = (resource.type, resource.id) if self.resourceshash.has_key(rkey): self.resourceshash[rkey].merge(resource) else: self.resourceshash[rkey] = resource self.resourceslist.append(resource) def mergeregistry(self, registry): for resource in registry.resources: self.add(resource) def output(self, file, writer): """ write all PostScript code of the prolog resources """ for resource in self.resourceslist: resource.output(file, writer, self) # # Abstract base class # class PSresource: """ a PostScript resource """ def __init__(self, type, id): # Every PSresource has to have a type and a unique id. # Resources with the same type and id will be merged # when they are registered in the PSregistry self.type = type self.id = id def merge(self, other): """ merge self with other, which has to be a resource of the same type and with the same id""" pass def output(self, file, writer, registry): raise NotImplementedError("output not implemented for %s" % repr(self)) class PSdefinition(PSresource): """ PostScript function definition included in the prolog """ def __init__(self, id, body): self.type = "definition" self.id = id self.body = body def output(self, file, writer, registry): file.write("%%%%BeginResource: %s\n" % self.id) file.write("%(body)s /%(id)s exch def\n" % self.__dict__) file.write("%%EndResource\n") # # Writers # class _PSwriter: def __init__(self, title=None, strip_fonts=True, text_as_path=False, mesh_as_bitmap=False, mesh_as_bitmap_resolution=300): self._fontmap = None self.title = title self.strip_fonts = strip_fonts self.text_as_path = text_as_path self.mesh_as_bitmap = mesh_as_bitmap self.mesh_as_bitmap_resolution = mesh_as_bitmap_resolution # dictionary mapping font names to dictionaries mapping encoding names to encodings # encodings themselves are mappings from glyphnames to codepoints self.encodings = {} def writeinfo(self, file): file.write("%%%%Creator: PyX %s\n" % version.version) if self.title is not None: file.write("%%%%Title: %s\n" % self.title) file.write("%%%%CreationDate: %s\n" % time.asctime(time.localtime(time.time()))) def getfontmap(self): if self._fontmap is None: # late import due to cyclic dependency from pyx.dvi import mapfile fontmapfiles = config.getlist("text", "psfontmaps", ["psfonts.map"]) self._fontmap = mapfile.readfontmap(fontmapfiles) return self._fontmap class EPSwriter(_PSwriter): def __init__(self, document, file, **kwargs): _PSwriter.__init__(self, **kwargs) if len(document.pages) != 1: raise ValueError("EPS file can be constructed out of a single page document only") page = document.pages[0] canvas = page.canvas pagefile = cStringIO.StringIO() registry = PSregistry() acontext = context() pagebbox = bbox.empty() page.processPS(pagefile, self, acontext, registry, pagebbox) file.write("%!PS-Adobe-3.0 EPSF-3.0\n") if pagebbox: file.write("%%%%BoundingBox: %d %d %d %d\n" % pagebbox.lowrestuple_pt()) file.write("%%%%HiResBoundingBox: %g %g %g %g\n" % pagebbox.highrestuple_pt()) self.writeinfo(file) file.write("%%EndComments\n") file.write("%%BeginProlog\n") registry.output(file, self) file.write("%%EndProlog\n") file.write(pagefile.getvalue()) pagefile.close() file.write("showpage\n") file.write("%%Trailer\n") file.write("%%EOF\n") class PSwriter(_PSwriter): def __init__(self, document, file, writebbox=False, **kwargs): _PSwriter.__init__(self, **kwargs) # We first have to process the content of the pages, writing them into the stream pagesfile # Doing so, we fill the registry and also calculate the page bounding boxes, which are # stored in page._bbox for every page pagesfile = cStringIO.StringIO() registry = PSregistry() # calculated bounding boxes of the whole document documentbbox = bbox.empty() for nr, page in enumerate(document.pages): # process contents of page pagefile = cStringIO.StringIO() acontext = context() pagebbox = bbox.empty() page.processPS(pagefile, self, acontext, registry, pagebbox) documentbbox += pagebbox pagesfile.write("%%%%Page: %s %d\n" % (page.pagename is None and str(nr+1) or page.pagename, nr+1)) if page.paperformat: pagesfile.write("%%%%PageMedia: %s\n" % page.paperformat.name) pagesfile.write("%%%%PageOrientation: %s\n" % (page.rotated and "Landscape" or "Portrait")) if pagebbox and writebbox: pagesfile.write("%%%%PageBoundingBox: %d %d %d %d\n" % pagebbox.lowrestuple_pt()) # page setup section pagesfile.write("%%BeginPageSetup\n") pagesfile.write("/pgsave save def\n") pagesfile.write("%%EndPageSetup\n") pagesfile.write(pagefile.getvalue()) pagefile.close() pagesfile.write("pgsave restore\n") pagesfile.write("showpage\n") pagesfile.write("%%PageTrailer\n") file.write("%!PS-Adobe-3.0\n") if documentbbox and writebbox: file.write("%%%%BoundingBox: %d %d %d %d\n" % documentbbox.lowrestuple_pt()) file.write("%%%%HiResBoundingBox: %g %g %g %g\n" % documentbbox.highrestuple_pt()) self.writeinfo(file) # required paper formats paperformats = {} for page in document.pages: if page.paperformat: paperformats[page.paperformat] = page.paperformat first = 1 for paperformat in paperformats.values(): if first: file.write("%%DocumentMedia: ") first = 0 else: file.write("%%+ ") file.write("%s %d %d 75 white ()\n" % (paperformat.name, unit.topt(paperformat.width), unit.topt(paperformat.height))) # file.write(%%DocumentNeededResources: ") # register not downloaded fonts here file.write("%%%%Pages: %d\n" % len(document.pages)) file.write("%%PageOrder: Ascend\n") file.write("%%EndComments\n") # document defaults section #file.write("%%BeginDefaults\n") #file.write("%%EndDefaults\n") # document prolog section file.write("%%BeginProlog\n") registry.output(file, self) file.write("%%EndProlog\n") # document setup section #file.write("%%BeginSetup\n") #file.write("%%EndSetup\n") file.write(pagesfile.getvalue()) pagesfile.close() file.write("%%Trailer\n") file.write("%%EOF\n") class context: def __init__(self): self.linewidth_pt = None self.colorspace = None self.selectedfont = None def __call__(self, **kwargs): newcontext = copy.copy(self) for key, value in kwargs.items(): setattr(newcontext, key, value) return newcontext
34.361217
126
0.610822
ec16df41406cbf057fa319d2d46e854377d05c0c
7,327
py
Python
linkml/generators/pydanticgen.py
rosdyana/linkml
dc2a082d59b32fadde709650f0f6581275db5658
[ "CC0-1.0" ]
null
null
null
linkml/generators/pydanticgen.py
rosdyana/linkml
dc2a082d59b32fadde709650f0f6581275db5658
[ "CC0-1.0" ]
null
null
null
linkml/generators/pydanticgen.py
rosdyana/linkml
dc2a082d59b32fadde709650f0f6581275db5658
[ "CC0-1.0" ]
null
null
null
import os import logging from typing import Optional, Tuple, List, Union, TextIO, Callable, Dict, Iterator, Set from copy import deepcopy import click from jinja2 import Template from linkml_runtime.utils.schemaview import SchemaView #from linkml.generators import pydantic_GEN_VERSION from linkml_runtime.linkml_model.meta import SchemaDefinition, TypeDefinition, ClassDefinition, Annotation from linkml_runtime.utils.formatutils import camelcase, underscore from linkml.generators.oocodegen import OOCodeGenerator from linkml.utils.generator import shared_arguments default_template = """ {#- Jinja2 Template for a pydantic classes -#} from __future__ import annotations from datetime import datetime, date from enum import Enum from typing import List, Dict, Optional from pydantic import BaseModel, Field {% for e in schema.enums.values() %} class {{ e.name }}(str, Enum): {% if e.description -%} \"\"\" {{ e.description }} \"\"\" {%- endif %} {% for pv in e.permissible_values.values() -%} {{underscore(pv.text)}} = "{{pv.text}}" {% endfor %} {% endfor %} {% for c in schema.classes.values() %} class {{ c.name }}( {%- if c.is_a %}{{c.is_a}}{% else %}BaseModel{% endif -%} {#- {%- for p in c.mixins %}, "{{p}}" {% endfor -%} -#} ): {% if c.description -%} \"\"\" {{ c.description }} \"\"\" {%- endif %} {% for attr in c.attributes.values() -%} {{attr.name}}: {{ attr.annotations['python_range'].value }} = Field(None {%- if attr.title != None %}, title="{{attr.title}}"{% endif -%} {%- if attr.description %}, description="{{attr.description}}"{% endif -%} {%- if attr.minimum_value != None %}, ge={{attr.minimum_value}}{% endif -%} {%- if attr.maximum_value != None %}, le={{attr.maximum_value}}{% endif -%} ) {% endfor %} {% if not c.attributes %} None {% endif %} {% endfor %} {% for c in schema.classes.values() %} {{ c.name }}.update_forward_refs() {% endfor %} """ def _get_pyrange(t: TypeDefinition, sv: SchemaView) -> str: pyrange = t.repr if pyrange is None: pyrange = t.base if t.base == 'XSDDateTime': pyrange = 'datetime ' if t.base == 'XSDDate': pyrange = 'date' if pyrange is None and t.typeof is not None: pyrange = _get_pyrange(sv.get_type(t.typeof), sv) if pyrange is None: raise Exception(f'No python type for range: {s.range} // {t}') return pyrange class PydanticGenerator(OOCodeGenerator): generatorname = os.path.basename(__file__) generatorversion = '0.0.1' valid_formats = ['pydantic'] visit_all_class_slots = False def __init__(self, schema: Union[str, TextIO, SchemaDefinition], package: str = None, template_file: str = None, format: str = valid_formats[0], genmeta: bool=False, gen_classvars: bool=True, gen_slots: bool=True, **kwargs) -> None: self.sourcefile = schema self.schemaview = SchemaView(schema) self.schema = self.schemaview.schema self.package = package self.template_file = template_file def map_type(self, t: TypeDefinition) -> str: return TYPEMAP.get(t.base, t.base) def serialize(self, directory: str = None) -> None: sv = self.schemaview if self.template_file is not None: with open(self.template_file) as template_file: template_obj = Template(template_file.read()) else: template_obj = Template(default_template) sv: SchemaView sv = self.schemaview schema = sv.schema #print(f'# SV c={sv.all_classes().keys()}') pyschema = SchemaDefinition(id=schema.id, name=schema.name, description=schema.description) for en, e in sv.all_enums().items(): e2: ClassDefinition e2 = deepcopy(e) e2.name = camelcase(e.name) pyschema.enums[e2.name] = e2 for cn, c in sv.all_classes().items(): c2: ClassDefinition c2 = deepcopy(c) c2.name = camelcase(c.name) if c2.is_a: c2.is_a = camelcase(c2.is_a) c2.mixins = [camelcase(p) for p in c2.mixins] pyschema.classes[c2.name] = c2 for a in list(c2.attributes.keys()): del c2.attributes[a] for sn in sv.class_slots(cn): # TODO: fix runtime, copy should not be necessary s = deepcopy(sv.induced_slot(sn, cn)) logging.error(f'Induced slot {cn}.{sn} == {s.name} {s.range}') s.name = underscore(s.name) c2.attributes[s.name] = s collection_key = None if s.range in sv.all_classes(): range_cls = sv.get_class(s.range) #pyrange = f'"{camelcase(s.range)}"' if s.inlined or sv.get_identifier_slot(range_cls.name) is None: pyrange = f'{camelcase(s.range)}' if sv.get_identifier_slot(range_cls.name) is not None and not s.inlined_as_list: #collection_type = sv.get_identifier_slot(range_cls.name).range collection_type = 'str' else: pyrange = 'str' elif s.range in sv.all_enums(): pyrange = f'{camelcase(s.range)}' elif s.range in sv.all_types(): t = sv.get_type(s.range) pyrange = _get_pyrange(t, sv) elif s.range is None: pyrange = 'str' else: # TODO: default ranges in schemagen #pyrange = 'str' #logging.error(f'range: {s.range} is unknown') raise Exception(f'range: {s.range}') if s.multivalued: if collection_key is None: pyrange = f'List[{pyrange}]' else: pyrange = f'Dict[{collection_key}, {pyrange}]' if not s.required: pyrange = f'Optional[{pyrange}]' ann = Annotation('python_range', pyrange) s.annotations[ann.tag] = ann code = template_obj.render(schema=pyschema, underscore=underscore) return code @shared_arguments(PydanticGenerator) @click.option("--output_directory", default="output", help="Output directory for individually generated class files") @click.option("--package", help="Package name where relevant for generated class files") @click.option("--template_file", help="Optional jinja2 template to use for class generation") @click.command() def cli(yamlfile, output_directory=None, package=None, template_file=None, head=True, emit_metadata=False, genmeta=False, classvars=True, slots=True, **args): """Generate pydantic classes to represent a LinkML model""" gen = PydanticGenerator(yamlfile, package=package, template_file=template_file, emit_metadata=head, genmeta=genmeta, gen_classvars=classvars, gen_slots=slots, **args) print(gen.serialize(output_directory)) if __name__ == '__main__': cli()
38.973404
171
0.587143
cf5ec17a1cdce1b2a9a7f37ac1145139cc00978e
11,056
py
Python
udocker/tools.py
vsoch/udocker
783b339934b0208ce1501fbd2bccb8362271a321
[ "Apache-2.0" ]
null
null
null
udocker/tools.py
vsoch/udocker
783b339934b0208ce1501fbd2bccb8362271a321
[ "Apache-2.0" ]
null
null
null
udocker/tools.py
vsoch/udocker
783b339934b0208ce1501fbd2bccb8362271a321
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """Tools for udocker""" import os import sys import tarfile import random import json import stat from udocker import is_genstr from udocker.config import Config from udocker.utils.curl import GetURL from udocker.utils.fileutil import FileUtil from udocker.msg import Msg from udocker import __version__ def _str(data): """Safe str for Python 3 and Python 2""" if sys.version_info[0] >= 3: try: return data.decode() except (UnicodeDecodeError, AttributeError): pass return data class UdockerTools(object): """Download and setup of the udocker supporting tools Includes: proot and alternative python modules, these are downloaded to facilitate the installation by the end-user. """ def __init__(self, localrepo): self.localrepo = localrepo # LocalRepository object self._autoinstall = Config.conf['autoinstall'] # True / False self._tarball = Config.conf['tarball'] # URL or file self._installinfo = Config.conf['installinfo'] # URL or file self._tarball_release = Config.conf['tarball_release'] self._installretry = Config.conf['installretry'] self._install_json = dict() self.curl = GetURL() def _instructions(self): """ Udocker installation instructions are available at: https://github.com/indigo-dc/udocker/tree/master/doc https://github.com/indigo-dc/udocker/tree/devel/doc Udocker requires additional tools to run. These are available in the udocker tarball. The tarballs are available at several locations. By default udocker will install from the locations defined in Config.tarball. To install from files or other URLs use these instructions: 1) set UDOCKER_TARBALL to a remote URL or local filename: $ export UDOCKER_TARBALL=http://host/path or $ export UDOCKER_TARBALL=/tmp/filename 2) run udocker with the install command or optionally using the option --force to overwrite the local installation: ./udocker install or ./udocker install --force 3) once installed the binaries and containers will be placed by default under $HOME/.udocker """ Msg().out(self._instructions.__doc__, l=Msg.ERR) Msg().out("udocker command line interface version:", __version__, "\nrequires udockertools tarball release :", self._tarball_release, l=Msg.ERR) def _version2int(self, version): """Convert version string to integer""" version_int = 0 factor = 1000 * 1000 for vitem in _str(version).strip().split('.'): try: version_int = version_int + (int(vitem) * factor) except (TypeError, ValueError): pass factor = factor / 1000 return int(version_int) def _version_isok(self, version): """Is version >= than the minimum required tarball release""" if not (version and self._tarball_release): return False tarball_version_int = self._version2int(version) required_version_int = self._version2int(self._tarball_release) return tarball_version_int >= required_version_int def is_available(self): """Are the tools already installed""" version = \ FileUtil(self.localrepo.libdir + "/VERSION").getdata('r').strip() return self._version_isok(version) def purge(self): """Remove existing files in bin, lib and doc""" for f_name in os.listdir(self.localrepo.bindir): f_path = self.localrepo.bindir + '/' + f_name FileUtil(f_path).register_prefix() FileUtil(f_path).remove(recursive=True) for f_name in os.listdir(self.localrepo.libdir): f_path = self.localrepo.libdir + '/' + f_name FileUtil(f_path).register_prefix() FileUtil(f_path).remove(recursive=True) for f_name in os.listdir(self.localrepo.docdir): f_path = self.localrepo.docdir + '/' + f_name FileUtil(f_path).register_prefix() FileUtil(f_path).remove(recursive=True) def _download(self, url): """Download a file """ download_file = FileUtil("udockertools").mktmp() if Msg.level <= Msg.DEF: Msg().setlevel(Msg.NIL) (hdr, dummy) = self.curl.get(url, ofile=download_file, follow=True) if Msg.level == Msg.NIL: Msg().setlevel() try: if "200" in hdr.data["X-ND-HTTPSTATUS"]: return download_file except (KeyError, TypeError, AttributeError): pass FileUtil(download_file).remove() return "" def _get_file(self, url): """Get file from list of possible locations file or internet""" filename = "" if "://" in url: filename = self._download(url) elif os.path.exists(url): filename = os.path.realpath(url) if filename and os.path.isfile(filename): return filename return "" def _verify_version(self, tarball_file): """verify the tarball version""" if not (tarball_file and os.path.isfile(tarball_file)): return (False, "") tmpdir = FileUtil("VERSION").mktmpdir() if not tmpdir: return (False, "") try: tfile = tarfile.open(tarball_file, "r:gz") for tar_in in tfile.getmembers(): if tar_in.name.startswith("udocker_dir/lib/VERSION"): tar_in.name = os.path.basename(tar_in.name) tfile.extract(tar_in, path=tmpdir) tfile.close() except tarfile.TarError: FileUtil(tmpdir).remove(recursive=True) return (False, "") tarball_version = FileUtil(tmpdir + "/VERSION").getdata('r').strip() status = self._version_isok(tarball_version) FileUtil(tmpdir).remove(recursive=True) return (status, tarball_version) def _install(self, tarball_file): """Install the tarball""" if not (tarball_file and os.path.isfile(tarball_file)): return False FileUtil(self.localrepo.topdir).chmod() self.localrepo.create_repo() try: tfile = tarfile.open(tarball_file, "r:gz") FileUtil(self.localrepo.bindir).rchmod() for tar_in in tfile.getmembers(): if tar_in.name.startswith("udocker_dir/bin/"): tar_in.name = os.path.basename(tar_in.name) Msg().out("Info: extrating", tar_in.name, l=Msg.DBG) tfile.extract(tar_in, self.localrepo.bindir) FileUtil(self.localrepo.bindir).rchmod(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) FileUtil(self.localrepo.libdir).rchmod() for tar_in in tfile.getmembers(): if tar_in.name.startswith("udocker_dir/lib/"): tar_in.name = os.path.basename(tar_in.name) Msg().out("Info: extrating", tar_in.name, l=Msg.DBG) tfile.extract(tar_in, self.localrepo.libdir) FileUtil(self.localrepo.libdir).rchmod() FileUtil(self.localrepo.docdir).rchmod() for tar_in in tfile.getmembers(): if tar_in.name.startswith("udocker_dir/doc/"): tar_in.name = os.path.basename(tar_in.name) Msg().out("Info: extrating", tar_in.name, l=Msg.DBG) tfile.extract(tar_in, self.localrepo.docdir) FileUtil(self.localrepo.docdir).rchmod() tfile.close() except tarfile.TarError: return False return True def _get_mirrors(self, mirrors): """Get shuffled list of tarball mirrors""" if is_genstr(mirrors): mirrors = mirrors.split(' ') try: random.shuffle(mirrors) except NameError: pass return mirrors def get_installinfo(self): """Get json containing installation info""" Msg().out("Info: searching for messages:", l=Msg.VER) for url in self._get_mirrors(self._installinfo): infofile = self._get_file(url) try: with open(infofile, 'r') as filep: self._install_json = json.load(filep) for msg in self._install_json["messages"]: Msg().out("Info:", msg) except (KeyError, AttributeError, ValueError, OSError, IOError): Msg().out("Info: no messages:", infofile, url, l=Msg.VER) return self._install_json def _install_logic(self, force=False): """Obtain random mirror, download, verify and install""" for url in self._get_mirrors(self._tarball): Msg().out("Info: install using:", url, l=Msg.VER) tarballfile = self._get_file(url) (status, version) = self._verify_version(tarballfile) if status: Msg().out("Info: installing udockertools", version) status = self._install(tarballfile) elif force: Msg().out("Info: forcing install of udockertools", version) status = self._install(tarballfile) else: Msg().err("Error: version is", version, "for", url, l=Msg.VER) if "://" in url and tarballfile: FileUtil(tarballfile).remove() if status: return True return False def install(self, force=False): """Get the udocker tools tarball and install the binaries""" if self.is_available() and not force: return True elif not self._autoinstall and not force: Msg().out("Warning: installation missing and autoinstall disabled", l=Msg.WAR) return None elif not self._tarball: Msg().out("Info: UDOCKER_TARBALL not set, installation skipped", l=Msg.VER) return True else: Msg().out("Info: udocker command line interface", __version__) Msg().out("Info: searching for udockertools", self._tarball_release, l=Msg.INF) retry = self._installretry while retry: if self._install_logic(force): self.get_installinfo() Msg().out("Info: installation of udockertools successful") return True retry = retry - 1 Msg().err("Error: installation failure retrying ...", l=Msg.VER) self._instructions() self.get_installinfo() Msg().err("Error: installation of udockertools failed") return False
38.929577
80
0.587464
82572407eb58b6783dac49705f0a1b5bec3dbe54
13,809
py
Python
runtime/python/Lib/encodings/cp1254.py
hwaipy/InteractionFreeNode
88642b68430f57b028fd0f276a5709f89279e30d
[ "MIT" ]
5,926
2015-01-01T07:45:08.000Z
2022-03-31T12:34:38.000Z
lib/assets/Lib/encodings/cp1254.py
it56660024/cafe-grader-web
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
[ "MIT" ]
1,728
2015-01-01T01:09:12.000Z
2022-03-30T23:25:22.000Z
lib/assets/Lib/encodings/cp1254.py
it56660024/cafe-grader-web
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
[ "MIT" ]
574
2015-01-02T01:36:10.000Z
2022-03-26T10:18:48.000Z
""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1254', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\u20ac' # 0x80 -> EURO SIGN '\ufffe' # 0x81 -> UNDEFINED '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS '\u2020' # 0x86 -> DAGGER '\u2021' # 0x87 -> DOUBLE DAGGER '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT '\u2030' # 0x89 -> PER MILLE SIGN '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE '\ufffe' # 0x8D -> UNDEFINED '\ufffe' # 0x8E -> UNDEFINED '\ufffe' # 0x8F -> UNDEFINED '\ufffe' # 0x90 -> UNDEFINED '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK '\u2022' # 0x95 -> BULLET '\u2013' # 0x96 -> EN DASH '\u2014' # 0x97 -> EM DASH '\u02dc' # 0x98 -> SMALL TILDE '\u2122' # 0x99 -> TRADE MARK SIGN '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE '\ufffe' # 0x9D -> UNDEFINED '\ufffe' # 0x9E -> UNDEFINED '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS '\xa0' # 0xA0 -> NO-BREAK SPACE '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK '\xa2' # 0xA2 -> CENT SIGN '\xa3' # 0xA3 -> POUND SIGN '\xa4' # 0xA4 -> CURRENCY SIGN '\xa5' # 0xA5 -> YEN SIGN '\xa6' # 0xA6 -> BROKEN BAR '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\xa9' # 0xA9 -> COPYRIGHT SIGN '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xac' # 0xAC -> NOT SIGN '\xad' # 0xAD -> SOFT HYPHEN '\xae' # 0xAE -> REGISTERED SIGN '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\xb2' # 0xB2 -> SUPERSCRIPT TWO '\xb3' # 0xB3 -> SUPERSCRIPT THREE '\xb4' # 0xB4 -> ACUTE ACCENT '\xb5' # 0xB5 -> MICRO SIGN '\xb6' # 0xB6 -> PILCROW SIGN '\xb7' # 0xB7 -> MIDDLE DOT '\xb8' # 0xB8 -> CEDILLA '\xb9' # 0xB9 -> SUPERSCRIPT ONE '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS '\xbf' # 0xBF -> INVERTED QUESTION MARK '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS '\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS '\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I '\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
44.834416
120
0.522775
81ee3124ddb9e0cf528d70831e82bea248bbef7c
1,168
py
Python
podcast-backend/src/app/pcasts/models/series.py
cuappdev/archives
061d0f9cccf278363ffaeb27fc655743b1052ae5
[ "MIT" ]
null
null
null
podcast-backend/src/app/pcasts/models/series.py
cuappdev/archives
061d0f9cccf278363ffaeb27fc655743b1052ae5
[ "MIT" ]
null
null
null
podcast-backend/src/app/pcasts/models/series.py
cuappdev/archives
061d0f9cccf278363ffaeb27fc655743b1052ae5
[ "MIT" ]
null
null
null
from . import * from app.pcasts.utils import topic_utils class Series(Base): __tablename__ = 'series' __bind_key__ = 'podcast_db' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.Text) country = db.Column(db.String(190)) author = db.Column(db.String(190)) image_url_lg = db.Column(db.Text) image_url_sm = db.Column(db.Text) feed_url = db.Column(db.Text, nullable=False) genres = db.Column(db.Text) # semicolon-separated subscribers_count = db.Column(db.Integer, nullable=False) topic_id = db.Column(db.Integer) subtopic_id = db.Column(db.BigInteger) def __init__(self, **kwargs): self.id = kwargs.get('id') self.title = kwargs.get('title', None) self.country = kwargs.get('country', None) self.author = kwargs.get('author', None) self.image_url_lg = kwargs.get('image_url_lg', None) self.image_url_sm = kwargs.get('image_url_sm', None) self.feed_url = kwargs.get('feed_url') self.genres = ';'.join(kwargs.get('genres', [])) self.subscribers_count = kwargs.get('subscribers_count', 0) self.topic_id, self.subtopic_id = \ topic_utils.get_topic_ids(kwargs.get('genres', []))
36.5
63
0.695205
e3d6f299d1c840616368742aa77c420dd8f2b99e
2,622
py
Python
ios_image_clean.py
FisherEat/python_ios_projects
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
[ "MIT" ]
null
null
null
ios_image_clean.py
FisherEat/python_ios_projects
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
[ "MIT" ]
null
null
null
ios_image_clean.py
FisherEat/python_ios_projects
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' 剔除iOS Xcode项目中没有用到的图片 ''' import csv import codecs import os import re import time import openpyxl def cur_file_dir(): # 文件的当前路径 path = os.path.split(os.path.realpath(__file__))[0] return path placeholdRe = re.compile(r'\%\d{1,2}\$s(\s|\?)?') rootPath = cur_file_dir() # 上级目录 rootPath = os.path.dirname(rootPath) inputFile = os.path.join(rootPath, "001.xlsx") # inputFile = rootPath + "v6Strings.csv" # targetPath = "E:/WorkProject/iosV6/" # targetPath = rootPath targetPath = os.path.join(rootPath, "SmartHomeV6/") outputFileData = [["en.lproj/", "Localizable.strings", 1], ["zh-Hans.lproj/", "Localizable.strings", 2]] def getLocalizedStringFile(): for outputFile in outputFileData: targetFilePath = targetPath + outputFile[0] targetFile = targetFilePath + outputFile[1] column_index = outputFile[2] if not os.path.exists(targetFilePath): os.makedirs(targetFilePath) with codecs.open(targetFile, 'w', "utf-8") as out: out.write('// %s' % time.strftime('%Y-%m-%d %H:%M:%S')) s = 0 # csv_reader = csv.reader(open(inputFile, encoding='utf-8')) xlxs = openpyxl.load_workbook(inputFile, read_only=True) wb = xlxs.active contentArr = [] for row in wb.rows: # 0 第1列分组 1 第5列英文 2 第4列中文 3 第2列Key s += 1 if s == 1: continue contentArr.append([row[1].value, row[5].value, row[4].value, row[2].value]) contentArr.sort(key= lambda x:x[0].lower()) groupName = '' for row in contentArr: if groupName != row[0]: out.write('\n\n' + '/* %s */' % row[0] + '\n') groupName = row[0] targetStr = row[column_index] if targetStr == None: targetStr = "" if not isinstance(targetStr, str): targetStr = str(targetStr) if targetStr.find('\"') > 0 or targetStr.startswith('\"'): targetStr = targetStr.replace('\"', '\\\"') if targetStr.find('\n') > 0: targetStr = targetStr.replace('\n', '\\n') targetStr = placeholdRe.sub('%@', targetStr) out.write("\"%s= \"%s\";" % ((row[3].replace('\n', '') + "\"").ljust(60), targetStr)) out.write("\n") out.flush() # if __name__ == '__main__': # getLocalizedStringFile()
31.97561
104
0.528986
c1fa97b3e4c562f18051278b9a800e1c2e4afb5a
10,774
py
Python
src/python/grpcio_tests/tests_aio/unit/metadata_test.py
emkornfield/grpc
34566361503541b7f080406d4b282088759071cf
[ "Apache-2.0" ]
1
2021-02-20T04:07:31.000Z
2021-02-20T04:07:31.000Z
src/python/grpcio_tests/tests_aio/unit/metadata_test.py
emkornfield/grpc
34566361503541b7f080406d4b282088759071cf
[ "Apache-2.0" ]
1
2019-11-12T15:41:31.000Z
2020-01-09T12:00:04.000Z
src/python/grpcio_tests/tests_aio/unit/metadata_test.py
emkornfield/grpc
34566361503541b7f080406d4b282088759071cf
[ "Apache-2.0" ]
2
2020-11-24T04:42:45.000Z
2020-11-24T05:22:12.000Z
# Copyright 2020 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests behavior around the metadata mechanism.""" import asyncio import logging import platform import random import unittest import grpc from grpc.experimental import aio from tests_aio.unit._test_base import AioTestBase from tests_aio.unit import _common _TEST_CLIENT_TO_SERVER = '/test/TestClientToServer' _TEST_SERVER_TO_CLIENT = '/test/TestServerToClient' _TEST_TRAILING_METADATA = '/test/TestTrailingMetadata' _TEST_ECHO_INITIAL_METADATA = '/test/TestEchoInitialMetadata' _TEST_GENERIC_HANDLER = '/test/TestGenericHandler' _TEST_UNARY_STREAM = '/test/TestUnaryStream' _TEST_STREAM_UNARY = '/test/TestStreamUnary' _TEST_STREAM_STREAM = '/test/TestStreamStream' _REQUEST = b'\x00\x00\x00' _RESPONSE = b'\x01\x01\x01' _INITIAL_METADATA_FROM_CLIENT_TO_SERVER = aio.Metadata( ('client-to-server', 'question'), ('client-to-server-bin', b'\x07\x07\x07'), ) _INITIAL_METADATA_FROM_SERVER_TO_CLIENT = aio.Metadata( ('server-to-client', 'answer'), ('server-to-client-bin', b'\x06\x06\x06'), ) _TRAILING_METADATA = aio.Metadata( ('a-trailing-metadata', 'stack-trace'), ('a-trailing-metadata-bin', b'\x05\x05\x05'), ) _INITIAL_METADATA_FOR_GENERIC_HANDLER = aio.Metadata( ('a-must-have-key', 'secret'),) _INVALID_METADATA_TEST_CASES = ( ( TypeError, ((42, 42),), ), ( TypeError, (({}, {}),), ), ( TypeError, ((None, {}),), ), ( TypeError, (({}, {}),), ), ( TypeError, (('normal', object()),), ), ) class _TestGenericHandlerForMethods(grpc.GenericRpcHandler): def __init__(self): self._routing_table = { _TEST_CLIENT_TO_SERVER: grpc.unary_unary_rpc_method_handler(self._test_client_to_server ), _TEST_SERVER_TO_CLIENT: grpc.unary_unary_rpc_method_handler(self._test_server_to_client ), _TEST_TRAILING_METADATA: grpc.unary_unary_rpc_method_handler(self._test_trailing_metadata ), _TEST_UNARY_STREAM: grpc.unary_stream_rpc_method_handler(self._test_unary_stream), _TEST_STREAM_UNARY: grpc.stream_unary_rpc_method_handler(self._test_stream_unary), _TEST_STREAM_STREAM: grpc.stream_stream_rpc_method_handler(self._test_stream_stream), } @staticmethod async def _test_client_to_server(request, context): assert _REQUEST == request assert _common.seen_metadata(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER, context.invocation_metadata()) return _RESPONSE @staticmethod async def _test_server_to_client(request, context): assert _REQUEST == request await context.send_initial_metadata( _INITIAL_METADATA_FROM_SERVER_TO_CLIENT) return _RESPONSE @staticmethod async def _test_trailing_metadata(request, context): assert _REQUEST == request context.set_trailing_metadata(_TRAILING_METADATA) return _RESPONSE @staticmethod async def _test_unary_stream(request, context): assert _REQUEST == request assert _common.seen_metadata(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER, context.invocation_metadata()) await context.send_initial_metadata( _INITIAL_METADATA_FROM_SERVER_TO_CLIENT) yield _RESPONSE context.set_trailing_metadata(_TRAILING_METADATA) @staticmethod async def _test_stream_unary(request_iterator, context): assert _common.seen_metadata(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER, context.invocation_metadata()) await context.send_initial_metadata( _INITIAL_METADATA_FROM_SERVER_TO_CLIENT) async for request in request_iterator: assert _REQUEST == request context.set_trailing_metadata(_TRAILING_METADATA) return _RESPONSE @staticmethod async def _test_stream_stream(request_iterator, context): assert _common.seen_metadata(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER, context.invocation_metadata()) await context.send_initial_metadata( _INITIAL_METADATA_FROM_SERVER_TO_CLIENT) async for request in request_iterator: assert _REQUEST == request yield _RESPONSE context.set_trailing_metadata(_TRAILING_METADATA) def service(self, handler_call_details): return self._routing_table.get(handler_call_details.method) class _TestGenericHandlerItself(grpc.GenericRpcHandler): @staticmethod async def _method(request, unused_context): assert _REQUEST == request return _RESPONSE def service(self, handler_call_details): assert _common.seen_metadata(_INITIAL_METADATA_FOR_GENERIC_HANDLER, handler_call_details.invocation_metadata) return grpc.unary_unary_rpc_method_handler(self._method) async def _start_test_server(): server = aio.server() port = server.add_insecure_port('[::]:0') server.add_generic_rpc_handlers(( _TestGenericHandlerForMethods(), _TestGenericHandlerItself(), )) await server.start() return 'localhost:%d' % port, server class TestMetadata(AioTestBase): async def setUp(self): address, self._server = await _start_test_server() self._client = aio.insecure_channel(address) async def tearDown(self): await self._client.close() await self._server.stop(None) async def test_from_client_to_server(self): multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER) call = multicallable(_REQUEST, metadata=_INITIAL_METADATA_FROM_CLIENT_TO_SERVER) self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_from_server_to_client(self): multicallable = self._client.unary_unary(_TEST_SERVER_TO_CLIENT) call = multicallable(_REQUEST) self.assertEqual(_INITIAL_METADATA_FROM_SERVER_TO_CLIENT, await call.initial_metadata()) self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_trailing_metadata(self): multicallable = self._client.unary_unary(_TEST_TRAILING_METADATA) call = multicallable(_REQUEST) self.assertEqual(_TRAILING_METADATA, await call.trailing_metadata()) self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_from_client_to_server_with_list(self): multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER) call = multicallable( _REQUEST, metadata=list(_INITIAL_METADATA_FROM_CLIENT_TO_SERVER)) # pytype: disable=wrong-arg-types self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) @unittest.skipIf(platform.system() == 'Windows', 'https://github.com/grpc/grpc/issues/21943') async def test_invalid_metadata(self): multicallable = self._client.unary_unary(_TEST_CLIENT_TO_SERVER) for exception_type, metadata in _INVALID_METADATA_TEST_CASES: with self.subTest(metadata=metadata): with self.assertRaises(exception_type): call = multicallable(_REQUEST, metadata=metadata) await call async def test_generic_handler(self): multicallable = self._client.unary_unary(_TEST_GENERIC_HANDLER) call = multicallable(_REQUEST, metadata=_INITIAL_METADATA_FOR_GENERIC_HANDLER) self.assertEqual(_RESPONSE, await call) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_unary_stream(self): multicallable = self._client.unary_stream(_TEST_UNARY_STREAM) call = multicallable(_REQUEST, metadata=_INITIAL_METADATA_FROM_CLIENT_TO_SERVER) self.assertTrue( _common.seen_metadata(_INITIAL_METADATA_FROM_SERVER_TO_CLIENT, await call.initial_metadata())) self.assertSequenceEqual([_RESPONSE], [request async for request in call]) self.assertEqual(_TRAILING_METADATA, await call.trailing_metadata()) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_stream_unary(self): multicallable = self._client.stream_unary(_TEST_STREAM_UNARY) call = multicallable(metadata=_INITIAL_METADATA_FROM_CLIENT_TO_SERVER) await call.write(_REQUEST) await call.done_writing() self.assertTrue( _common.seen_metadata(_INITIAL_METADATA_FROM_SERVER_TO_CLIENT, await call.initial_metadata())) self.assertEqual(_RESPONSE, await call) self.assertEqual(_TRAILING_METADATA, await call.trailing_metadata()) self.assertEqual(grpc.StatusCode.OK, await call.code()) async def test_stream_stream(self): multicallable = self._client.stream_stream(_TEST_STREAM_STREAM) call = multicallable(metadata=_INITIAL_METADATA_FROM_CLIENT_TO_SERVER) await call.write(_REQUEST) await call.done_writing() self.assertTrue( _common.seen_metadata(_INITIAL_METADATA_FROM_SERVER_TO_CLIENT, await call.initial_metadata())) self.assertSequenceEqual([_RESPONSE], [request async for request in call]) self.assertEqual(_TRAILING_METADATA, await call.trailing_metadata()) self.assertEqual(grpc.StatusCode.OK, await call.code()) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
37.409722
112
0.676536
b61444f361d1862c332036e75321a1a0ef16ebbd
584
py
Python
run.py
playgrdstar/almost_simplest_server
fe99a79afac1bfdff493a73b724ae5e608290070
[ "MIT" ]
1
2019-01-28T00:20:57.000Z
2019-01-28T00:20:57.000Z
run.py
playgrdstar/almost_simplest_server
fe99a79afac1bfdff493a73b724ae5e608290070
[ "MIT" ]
null
null
null
run.py
playgrdstar/almost_simplest_server
fe99a79afac1bfdff493a73b724ae5e608290070
[ "MIT" ]
null
null
null
from flask import Flask from flask import render_template app = Flask(__name__) @app.route("/") def main(): return render_template('base.html', title='This is the title', bodytext='This is the body text') @app.route("/extended") def extended(): return render_template('extension.html', title_extended='This is the title of another page', bodytext_extended='This is the body text of another page ') if __name__ == "__main__": app.run(host="0.0.0.0", debug=True, port=8000)
29.2
83
0.60274
80e90ccbc0081b6ad70785d6ac8ccaaf59380ff2
817
py
Python
game/pathutils.py
Kupoman/prototype-mercury
895bd8f62bbff66f6cbce3dd6b98efc5468395c2
[ "Apache-2.0" ]
2
2018-04-03T02:49:48.000Z
2019-03-11T00:27:57.000Z
game/pathutils.py
Moguri/prototype-mercury
895bd8f62bbff66f6cbce3dd6b98efc5468395c2
[ "Apache-2.0" ]
58
2020-04-13T05:45:45.000Z
2022-02-23T01:49:44.000Z
game/pathutils.py
Moguri/prototype-mercury
895bd8f62bbff66f6cbce3dd6b98efc5468395c2
[ "Apache-2.0" ]
null
null
null
import appdirs import panda3d.core as p3d _APPNAME = 'mercury' _APPAUTHOR = False _APPDIRS = appdirs.AppDirs(appname=_APPNAME, appauthor=_APPAUTHOR, roaming=True) APP_ROOT_DIR = p3d.Filename.expand_from('$MAIN_DIR') CONFIG_DIR = p3d.Filename(APP_ROOT_DIR, 'config') USER_CONFIG_DIR = p3d.Filename.from_os_specific(_APPDIRS.user_config_dir) _PATH_VARS = { '$USER_APPDATA': _APPDIRS.user_data_dir, '$MAIN_DIR': str(APP_ROOT_DIR), } def parse_path(path): path = str(path) path_parts = path.split('/') path_parts = [ _PATH_VARS.get(i, i) for i in path_parts ] return p3d.Filename('/'.join(path_parts)) def get_saves_dir(): savespath = p3d.ConfigVariableString('mercury-saves-dir', '$USER_APPDATA/saves') savesdir = parse_path(savespath) return savesdir
22.081081
84
0.71481
4a6e98f234b3d04f907e9fd6f4901b14a10ba684
4,766
py
Python
cfgov/regulations3k/wagtail_hooks.py
adebisi-aden/consumerfinance.gov
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
[ "CC0-1.0" ]
37
2020-08-18T19:52:39.000Z
2022-03-23T08:08:41.000Z
cfgov/regulations3k/wagtail_hooks.py
adebisi-aden/consumerfinance.gov
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
[ "CC0-1.0" ]
338
2020-08-14T20:46:36.000Z
2022-03-31T20:49:32.000Z
cfgov/regulations3k/wagtail_hooks.py
adebisi-aden/consumerfinance.gov
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
[ "CC0-1.0" ]
14
2020-10-21T15:27:03.000Z
2022-03-17T03:16:36.000Z
from datetime import date from django.utils.encoding import force_text from wagtail.contrib.modeladmin.options import modeladmin_register from treemodeladmin.helpers import TreeAdminURLHelper from treemodeladmin.options import TreeModelAdmin from treemodeladmin.views import TreeIndexView from regulations3k.copyable_modeladmin import CopyableModelAdmin from regulations3k.models import EffectiveVersion, Part, Section, Subpart class RegsURLHelper(TreeAdminURLHelper): def crumb( self, parent_field=None, parent_instance=None, specific_instance=None ): """ Override the URL helper's crumb method to shorten reg model crumbs. The regulation models include their "parent" model's string representation within their own string representation. This is useful when referencing a model without context, but not so useful in breadcrumbs. This method will remove the "[Parent], " from the string if the specific_instance and parent_instance are provided. """ index_url, crumb_text = super().crumb( parent_field=parent_field, parent_instance=parent_instance, specific_instance=specific_instance ) if specific_instance is not None and parent_instance is not None: crumb_text = force_text(specific_instance).replace( force_text(parent_instance) + ", ", "" ) return (index_url, crumb_text) class SectionPreviewIndexView(TreeIndexView): def get_buttons_for_obj(self, obj): btns = self.button_helper.get_buttons_for_obj( obj, classnames_add=['button-small', 'button-secondary']) effective_version = obj.subpart.version date_str = effective_version.effective_date.isoformat() part = obj.subpart.version.part page = part.page.first() if page is not None: if effective_version.draft: label = 'View draft' else: label = 'View live' preview_url = page.url + page.reverse_subpage( 'section', kwargs={'date_str': date_str, 'section_label': obj.url_path} ) preview_button = { 'url': preview_url, 'label': label, 'classname': 'button button-small button-secondary', 'title': 'Preview this {}'.format(self.verbose_name), } btns.insert(-1, preview_button) return btns class SectionModelAdmin(TreeModelAdmin): model = Section menu_label = 'Regulation section content' menu_icon = 'list-ul' list_display = ( 'label', 'title', ) search_fields = ( 'label', 'title') parent_field = 'subpart' index_view_class = SectionPreviewIndexView url_helper_class = RegsURLHelper class SubpartModelAdmin(TreeModelAdmin): model = Subpart menu_label = 'Regulation subpart' menu_icon = 'list-ul' list_display = ( 'title', 'section_range' ) child_field = 'sections' child_model_admin = SectionModelAdmin parent_field = 'version' ordering = ['subpart_type', 'title'] url_helper_class = RegsURLHelper class EffectiveVersionModelAdmin(CopyableModelAdmin): model = EffectiveVersion menu_label = 'Regulation effective versions' menu_icon = 'list-ul' list_display = ( 'effective_date', 'status', 'created') child_field = 'subparts' child_model_admin = SubpartModelAdmin parent_field = 'part' url_helper_class = RegsURLHelper def copy(self, instance): subparts = instance.subparts.all() # Copy the instance new_version = instance new_version.pk = None new_version.created = date.today() new_version.draft = True new_version.save() for subpart in subparts: sections = subpart.sections.all() # Copy the subpart new_subpart = subpart new_subpart.pk = None new_subpart.version = new_version new_subpart.save() for section in sections: # Copy the section new_section = section new_section.pk = None new_section.subpart = new_subpart new_section.save() return new_version @modeladmin_register class PartModelAdmin(TreeModelAdmin): model = Part menu_label = 'Regulations' menu_icon = 'list-ul' list_display = ( 'part_number', 'title', 'short_name' ) child_field = 'versions' child_model_admin = EffectiveVersionModelAdmin url_helper_class = RegsURLHelper
29.974843
79
0.63911
c1506ca6215e94c048d32881551be919e081901a
2,528
py
Python
app.py
carusooo/tapper-time
26eaf36b1ad2732c1de687ba52776e383a756339
[ "MIT" ]
null
null
null
app.py
carusooo/tapper-time
26eaf36b1ad2732c1de687ba52776e383a756339
[ "MIT" ]
null
null
null
app.py
carusooo/tapper-time
26eaf36b1ad2732c1de687ba52776e383a756339
[ "MIT" ]
null
null
null
import logging import threading import time from lib.screens import core, task_list, task_timer, timer_done from lib.service import todoist logging.basicConfig(level=logging.DEBUG) class Application(core.App): """ The core application """ service: todoist.Todoist current_screen: core.Screen = None __thread: threading.Thread tasks_changed: bool pomodoro_duration_s: int def __init__(self, api, project_id, pomodoro_duration): self.service = todoist.Todoist(api_key=api, project_id=project_id) self.pomodoro_duration_s = pomodoro_duration * 60 self.tasks_changed = False self.__redraw = False self.__task_list = task_list.TaskListScreen(self, self.service) self.__task_timer = task_timer.TaskTimerScreen(self, self.service) self.__timer_done = timer_done.TimerDoneScreen(self, self.service) self.__thread = None self.__timer_end_time = 0 def start(self): self.current_screen = self.task_list return self.current_screen.draw() def handle_click(self, click): # The XY are reported for portrait orientation and with (0,0) in the lower left point = core.Point(y=(core.SCREEN_HEIGHT-click.X[0]), x=click.Y[0], s=click.S[0]) self.current_screen = self.current_screen.handle_click(point) return self.current_screen.draw() def draw(self): return self.current_screen.draw() @property def task_list(self) -> core.Screen: if self.__thread and self.__thread.is_alive(): self.__thread.cancel() if self.tasks_changed: self.__task_list.refresh() self.tasks_changed = False return self.__task_list @property def task_timer(self) -> core.Screen: self.__timer_end_time = time.time() + self.pomodoro_duration_s self.__task_timer.end_time = self.__timer_end_time self.update_timer() return self.__task_timer @property def timer_done(self) -> core.Screen: return self.__timer_done @property def redraw(self): redraw = self.__redraw self.__redraw = False return redraw def request_redraw(self): self.__redraw = True def update_timer(self): if time.time() >= self.__timer_end_time: self.current_screen = self.timer_done self.request_redraw() self.__thread = threading.Timer(60, self.update_timer) self.__thread.start() self.request_redraw()
32
89
0.672468
d86d20bf7ce4365ecff4e232e65ef6077b70dbad
3,899
py
Python
code/data/util.py
Steve-Tod/DeformSyncNet
c4e6628ae4fd80e6e6aa702f4cd5885368047b4f
[ "MIT" ]
19
2020-09-09T10:38:25.000Z
2021-09-08T03:24:02.000Z
code/data/util.py
Steve-Tod/DeformSyncNet
c4e6628ae4fd80e6e6aa702f4cd5885368047b4f
[ "MIT" ]
null
null
null
code/data/util.py
Steve-Tod/DeformSyncNet
c4e6628ae4fd80e6e6aa702f4cd5885368047b4f
[ "MIT" ]
4
2020-09-16T10:59:50.000Z
2022-01-07T00:10:18.000Z
import torch import random import numpy as np def read_cat2id_dict(path): with open(path) as f: lines = f.read().splitlines() cat2id_dict = {} for l in lines: cat, identity = l.split('\t') cat2id_dict[cat] = identity return cat2id_dict # Normalization def center_bounding_box(points): # input : Numpy Tensor N_pts, D_dim # ouput : Numpy Tensor N_pts, D_dim # Center bounding box of first 3 dimensions if isinstance(points, torch.Tensor): min_vals = torch.min(points, 0)[0] max_vals = torch.max(points, 0)[0] points = points - (min_vals + max_vals) / 2 return points, (min_vals + max_vals) / 2, (max_vals - min_vals) / 2 elif isinstance(points, np.ndarray): min_vals = np.min(points, 0) max_vals = np.max(points, 0) points = points - (min_vals + max_vals) / 2 return points, (min_vals + max_vals) / 2, (max_vals - min_vals) / 2 else: print(type(points)) print("Pierre-Alain was right.") def BoundingBox(points): points, _, bounding_box = center_bounding_box(points) diameter = torch.max(bounding_box) points = points / diameter return points ############ # re-sample ############ class Walkerrandom: """ Walker's alias method for random objects with different probablities http://code.activestate.com/recipes/576564-walkers-alias-method-for-random-objects-with-diffe/ """ def __init__(self, weights, keys=None): """ builds the Walker tables prob and inx for calls to random(). The weights (a list or tuple or iterable) can be in any order; they need not sum to 1. """ n = self.n = len(weights) self.keys = keys sumw = sum(weights) prob = [w * n / sumw for w in weights] # av 1 inx = [-1] * n short = [j for j, p in enumerate(prob) if p < 1] long = [j for j, p in enumerate(prob) if p > 1] while short and long: j = short.pop() k = long[-1] # assert prob[j] <= 1 <= prob[k] inx[j] = k prob[k] -= (1 - prob[j]) # -= residual weight if prob[k] < 1: short.append(k) long.pop() #if Test: # print "test Walkerrandom: j k pk: %d %d %.2g" % (j, k, prob[k]) self.prob = prob self.inx = inx #if Test: # print "test", self def __str__(self): """ e.g. "Walkerrandom prob: 0.4 0.8 1 0.8 inx: 3 3 -1 2" """ probstr = " ".join(["%.2g" % x for x in self.prob]) inxstr = " ".join(["%.2g" % x for x in self.inx]) return "Walkerrandom prob: %s inx: %s" % (probstr, inxstr) def random(self): """ each call -> a random int or key with the given probability fast: 1 randint(), 1 random.uniform(), table lookup """ u = random.uniform(0, 1) j = random.randint(0, self.n - 1) # or low bits of u randint = j if u <= self.prob[j] else self.inx[j] return self.keys[randint] if self.keys else randint def resample_points(points, scales, n_out_points=None): assert (points.ndim == 3) assert (scales.ndim == 1) assert (points.shape[0] == scales.size) n_sets = points.shape[0] n_in_points = points.shape[1] n_dim = points.shape[2] if n_out_points is None: n_out_points = points.shape[1] wrand = Walkerrandom(scales) set_idxs = np.empty(n_out_points, dtype='int') index_list = [[] for _ in range(points.shape[0])] for i in range(n_out_points): set_idxs[i] = wrand.random() index_list[set_idxs[i]].append(i) point_rand_idxs_in_sets = np.random.randint(n_in_points, size=n_out_points) sample_points = points[set_idxs, point_rand_idxs_in_sets, :] return sample_points, set_idxs, index_list
32.764706
98
0.582457
5d9d6a3fc17125432a4567bf195ede8c0582038f
879
py
Python
tests/test_options_in_viewfunc.py
this-id/flask-openapi3
b587e4e1534b0b04f7200f64264ec9d9f70f2103
[ "MIT" ]
null
null
null
tests/test_options_in_viewfunc.py
this-id/flask-openapi3
b587e4e1534b0b04f7200f64264ec9d9f70f2103
[ "MIT" ]
2
2022-03-31T10:02:42.000Z
2022-03-31T13:44:32.000Z
tests/test_options_in_viewfunc.py
this-id/flask-openapi3
b587e4e1534b0b04f7200f64264ec9d9f70f2103
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Author : llc # @Time : 2022/5/15 14:19 import pytest from flask_openapi3 import APIBlueprint, OpenAPI app = OpenAPI(__name__) app.config["TESTING"] = True api = APIBlueprint( '/book', __name__, url_prefix='/api', ) @pytest.fixture def client(): client = app.test_client() return client @app.get('/book', endpoint='endpoint_get_book') def get_book(): return 'app_book' @api.post('/book', endpoint='endpoint_post_book') def create_book(): return 'api_book' # register api app.register_api(api) def test_get(client): resp = client.get("/book") assert resp.text == 'app_book' assert 'endpoint_get_book' in app.view_functions.keys() def test_post(client): resp = client.post("/api/book") assert resp.text == 'api_book' assert '/book.endpoint_post_book' in app.view_functions.keys()
16.903846
66
0.67008
845b7efe93a701d1f845fea3a18ec84c6dc93ac9
467
py
Python
gallery/migrations/0003_auto_20200309_1402.py
D0nmoses/dagram
4416c4eafda83adea4ebfc2946386dfdfc9ab520
[ "MIT" ]
null
null
null
gallery/migrations/0003_auto_20200309_1402.py
D0nmoses/dagram
4416c4eafda83adea4ebfc2946386dfdfc9ab520
[ "MIT" ]
6
2021-03-19T00:49:29.000Z
2021-09-08T01:50:57.000Z
gallery/migrations/0003_auto_20200309_1402.py
D0nmoses/dagram
4416c4eafda83adea4ebfc2946386dfdfc9ab520
[ "MIT" ]
null
null
null
# Generated by Django 3.0.4 on 2020-03-09 11:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gallery', '0002_auto_20200309_1302'), ] operations = [ migrations.AlterField( model_name='profile', name='profile_pic', field=models.ImageField(blank=True, default='/profile-pic/default-no-profile-pic.jpg', upload_to='profile-pic/'), ), ]
24.578947
125
0.627409
a210e3ca67217c02d3dfdfa2dced73963e0c5237
4,711
py
Python
src/rightsizer/main.py
gregoryjordanm/awsrightsizer
6c9cb993437e9dd45356bf283d80eefbaf83e0d4
[ "Apache-2.0" ]
27
2018-07-10T18:21:52.000Z
2018-09-29T14:50:40.000Z
src/rightsizer/main.py
gregoryjordanm/awsrightsizer
6c9cb993437e9dd45356bf283d80eefbaf83e0d4
[ "Apache-2.0" ]
8
2018-07-11T01:49:25.000Z
2018-07-25T14:14:15.000Z
src/rightsizer/main.py
gregoryjordanm/awsrightsizer
6c9cb993437e9dd45356bf283d80eefbaf83e0d4
[ "Apache-2.0" ]
1
2018-07-07T15:55:26.000Z
2018-07-07T15:55:26.000Z
# Standard Library Imports import csv import sys from logging import DEBUG, WARNING from datetime import datetime # Third-party Imports import click # Local Imports from .AwsRightSizer import Main def print_help(ctx, param, value): if value is False: return click.echo(ctx.get_help()) ctx.exit() @click.command() @click.option('--profile', '-p', 'profile', default='default', required=False, help='Your AWS Credentials Profile.') @click.option('--access-key', '-k', 'accessKey', default=None, required=False, help='Your AWS Access Key ID.') @click.option('--secret-key', '-s', 'secretKey', default=None, required=False, help='Your AWS Secret Access Key ID.') @click.option('--region', '-r', 'region', default=None, required=False, help='The AWS Region to query.') @click.option('--threshold', '-t', 'threshold', nargs=2, type=int, default=[5, 30], required=False, help='The Cloudwatch [average, max] CPU usage threshold.') @click.option('--query-config', '-q', 'query', nargs=2, type=int, default=[30, 1800], required=False, help='The amount of [days, period] to query Cloudwatch for.') @click.option('--output', '-o', 'output', default='report_()'.format(datetime.date(datetime.now())), help='The Name/Location of the file to output.') @click.option('--ec2-only', '-e', 'ec2only', is_flag=True) @click.option('--rds-only', '-d', 'rdsonly', is_flag=True) @click.option('--verbose', '-v', 'verbose', is_flag=True) @click.option('--help', '-h', is_flag=True, expose_value=False, is_eager=False, callback=print_help, help='Print help message') @click.pass_context def main(ctx, profile, accessKey, secretKey, region, threshold, query, output, ec2only, rdsonly, verbose): """ rightsizer takes user input and provides an output CSV of suggestions. """ if len(sys.argv) <= 1: print_help(ctx, None, value=True) if verbose: v = DEBUG else: v = WARNING x = Main( accessKey=accessKey, secretKey=secretKey, region=region, profile=profile, thresholdAvg=threshold[0], thresholdMax=threshold[1], queryDays=query[0], queryPeriod=query[1], output=output, verbose=v) if ec2only: if profile: pf = profile else: pf = region extra = { 'Id': '* - AWS Suggested Upgrade' } with open('{}_ec2_'+'{}'.format(pf, x.output), 'w', newline='') as f: fieldnames = [ 'Id', 'Name', 'AvgCpu', 'CurrentType', 'SuggestedType' ] w = csv.DictWriter(f, fieldnames) w.writeheader() for r in x.getec2suggestions(): w.writerow(r) csv.writer(f).writerow('') w.writerow(extra) elif rdsonly: if profile: pf = profile else: pf = region extra = { 'Id': '* - AWS Suggested Upgrade' } with open('{}_rds_'+'{}'.format(pf, x.output), 'w', newline='') as f: fieldnames = [ 'Id', 'Name', 'Engine', 'AvgCpu', 'CurrentType', 'SuggestedType' ] w = csv.DictWriter(f, fieldnames) w.writeheader() for r in x.getrdssuggestions(): w.writerow(r) csv.writer(f).writerow('') w.writerow(extra) else: if profile: pf = profile else: pf = region extra = { 'Id': '* - AWS Suggested Upgrade' } with open('{}_ec2_' + '{}'.format(pf, x.output), 'w', newline='') as f: fieldnames = [ 'Id', 'Name', 'AvgCpu', 'CurrentType', 'SuggestedType' ] w = csv.DictWriter(f, fieldnames) w.writeheader() for r in x.getec2suggestions(): w.writerow(r) csv.writer(f).writerow('') w.writerow(extra) with open('{}_rds_' + '{}'.format(pf, x.output), 'w', newline='') as f: fieldnames = [ 'Id', 'Name', 'Engine', 'AvgCpu', 'CurrentType', 'SuggestedType' ] w = csv.DictWriter(f, fieldnames) w.writeheader() for r in x.getrdssuggestions(): w.writerow(r) csv.writer(f).writerow('') w.writerow(extra)
25.464865
163
0.516239
225a2c587748c42a11ad080786aa506841e79f67
970
py
Python
config.py
zhongxinghong/pkuyouth
9262346b3c88109ca731387095d5678071a05d60
[ "Apache-2.0" ]
1
2018-06-19T04:00:35.000Z
2018-06-19T04:00:35.000Z
config.py
zhongxinghong/PKUyouth
9262346b3c88109ca731387095d5678071a05d60
[ "Apache-2.0" ]
null
null
null
config.py
zhongxinghong/PKUyouth
9262346b3c88109ca731387095d5678071a05d60
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # filename: config.py import os basedir = os.path.abspath(os.path.dirname(__file__)) from app.lib.utilfuncs import get_secret class Config(object): """基础配置""" DEBUG = True APPLICATION_ROOT = basedir TRAP_BAD_REQUEST_ERRORS = False JSON_AS_ASCII = False SECRET_KEY = get_secret("flask_secret_key.pkl") SESSION_COOKIE_PATH = '/' @staticmethod def init_app(app): pass class DevelopmentConfig(Config): UPLOAD_FOLDER = os.path.join(basedir,"static/upload") MAX_CONTENT_LENGTH = 12 * 1024 * 1024 # 最大12MB DEBUG = True class PKUYouthMiniProgram(Config): pass class PKUYouthMiniProgramDevelop(PKUYouthMiniProgram): DEBUG = True class PKUYouthMiniProgramRelease(PKUYouthMiniProgram): DEBUG = False config = { #注册dict 'default': DevelopmentConfig, 'pkuyouth_miniprogram_release': PKUYouthMiniProgramRelease, 'pkuyouth_miniprogram_develop': PKUYouthMiniProgramDevelop, 'pkuyouth_server': Config, }
19.795918
60
0.76701
0de3122d057ef28e8f842158f5e44445ed6e01bb
376
py
Python
utils.py
AsianZeus/Clinical-ICD10-Code-Prediction
cbfa9191641d8c832e724a1fe607f25ee28b29cd
[ "Apache-2.0" ]
null
null
null
utils.py
AsianZeus/Clinical-ICD10-Code-Prediction
cbfa9191641d8c832e724a1fe607f25ee28b29cd
[ "Apache-2.0" ]
null
null
null
utils.py
AsianZeus/Clinical-ICD10-Code-Prediction
cbfa9191641d8c832e724a1fe607f25ee28b29cd
[ "Apache-2.0" ]
null
null
null
from resources import config, tokenizer, model from pydantic import BaseModel class Description(BaseModel): description: str def get_code(text): encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) codes = [config.id2label[ids] for ids in output.logits.detach().cpu().numpy()[ 0].argsort()[::-1][:5]] return codes
28.923077
82
0.699468
7b78b63ab10e9c6dd58df99a8e6622cf35f61091
3,828
py
Python
jax/_src/source_info_util.py
jamestwebber/jax
9450b8f8f96cb4d57b3f6337dcc18d3d104ecf6b
[ "ECL-2.0", "Apache-2.0" ]
2
2021-06-12T07:03:42.000Z
2021-06-27T08:48:12.000Z
jax/_src/source_info_util.py
jamestwebber/jax
9450b8f8f96cb4d57b3f6337dcc18d3d104ecf6b
[ "ECL-2.0", "Apache-2.0" ]
2
2022-01-31T13:20:35.000Z
2022-02-14T13:20:49.000Z
jax/_src/source_info_util.py
jamestwebber/jax
9450b8f8f96cb4d57b3f6337dcc18d3d104ecf6b
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import itertools import os.path import threading from typing import Any, Optional, Iterator import jax.version from jax.lib import xla_client from jax._src import traceback_util traceback_util.register_exclusion(__file__) Traceback = Any # xla_client.Traceback Frame = Any # xla_client.Traceback::Frame _exclude_paths = [os.path.dirname(jax.version.__file__)] def register_exclusion(path): _exclude_paths.append(path) def user_frames(source_info: Optional[Traceback]) -> Iterator[Frame]: """Heuristic that guesses the identity of the user's code in a stack trace.""" # Guess the user's frame is the innermost frame not in the jax source tree # We don't use traceback_util.path_starts_with because that incurs filesystem # access, which may be slow; we call this function when e.g. adding source # provenance annotations to XLA lowerings, so we don't want to incur the cost. # We consider files that end with _test.py as user frames, to allow testing # this mechanism from tests. return (x for x in (source_info.frames if source_info else []) if x.file_name.endswith("_test.py") or not any(x.file_name.startswith(p) for p in _exclude_paths)) def user_frame(source_info: Optional[Traceback]) -> Optional[Frame]: return next(user_frames(source_info), None) def summarize(source_info: Optional[Traceback], num_frames=1) -> str: frames = itertools.islice(user_frames(source_info), num_frames) frame_strs = [f"{frame.file_name}:{frame.line_num} ({frame.function_name})" if frame else "unknown" for frame in frames] return '\n'.join(reversed(frame_strs)) class _SourceInfoContext(threading.local): context: Optional[Traceback] def __init__(self): self.context = None _source_info_context = _SourceInfoContext() def current() -> Optional[Traceback]: return _source_info_context.context or xla_client.Traceback.get_traceback() class JaxStackTraceBeforeTransformation(Exception): pass _message = ( 'The preceding stack trace is the source of the JAX operation that, once ' 'transformed by JAX, triggered the following exception.\n' '\n--------------------') def has_user_context(e): while e is not None: if isinstance(e, JaxStackTraceBeforeTransformation): return True e = e.__cause__ return False @contextlib.contextmanager def user_context(c): prev = _source_info_context.context _source_info_context.context = c or _source_info_context.context filtered_tb = None try: yield except Exception as e: if c is None or has_user_context(e): raise # TODO(phawkins): remove the following condition after Jaxlib 0.1.66 is the # minimum. if not hasattr(c, 'as_python_traceback'): raise filtered_tb = traceback_util.filter_traceback(c.as_python_traceback()) if filtered_tb: msg = traceback_util.format_exception_only(e) msg = f'{msg}\n\n{_message}' c = JaxStackTraceBeforeTransformation(msg).with_traceback(filtered_tb) c.__context__ = e.__context__ c.__cause__ = e.__cause__ c.__suppress_context__ = e.__suppress_context__ e.__context__ = None e.__cause__ = c raise finally: _source_info_context.context = prev del filtered_tb
34.486486
108
0.742947
7bbf7d0b16035b7ccb9b6b02418d2d78ada49d45
4,681
py
Python
network/resnet.py
ustato/sber-swap
1140e085e165ed14e1098d81b7abd63feafedecf
[ "Apache-2.0" ]
210
2022-01-24T12:58:08.000Z
2022-03-31T07:54:22.000Z
network/resnet.py
ustato/sber-swap
1140e085e165ed14e1098d81b7abd63feafedecf
[ "Apache-2.0" ]
16
2022-01-24T23:45:04.000Z
2022-03-30T22:53:46.000Z
network/resnet.py
ustato/sber-swap
1140e085e165ed14e1098d81b7abd63feafedecf
[ "Apache-2.0" ]
35
2022-01-25T00:55:36.000Z
2022-03-30T22:45:27.000Z
import torch.nn as nn import math def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, include_top=True): self.inplanes = 64 super(ResNet, self).__init__() self.include_top = include_top self.conv0 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False) self.bn0 = nn.BatchNorm2d(64) self.relu0 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(64, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 32, layers[0], stride=2) self.layer2 = self._make_layer(block, 64, layers[1], stride=2) self.layer3 = self._make_layer(block, 128, layers[2], stride=2) self.layer4 = self._make_layer(block, 256, layers[3], stride=2) self.layer5 = self._make_layer(block, 512, layers[4], stride=2) self.layer6 = self._make_layer(block, 256, layers[5], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x0 = self.conv0(x) x0 = self.bn0(x0) x0 = self.relu0(x0) x1 = self.conv1(x0) x1 = self.bn1(x1) x1 = self.relu(x1) x2 = self.layer1(x1) x3 = self.layer2(x2) x4 = self.layer3(x3) x5 = self.layer4(x4) x6 = self.layer5(x5) x7 = self.layer6(x6) return x7, x6, x5, x4, x3, x2, x1, x0 def MLAttrEncoderResnet(**kwargs): model = ResNet(Bottleneck, [2, 2, 2, 2, 2, 2], **kwargs) return model
31.416107
94
0.582568
5f32f092c7cbe640bb3669e18634121cf47c8813
16,565
py
Python
python/pyspark/mllib/evaluation.py
shagunsodhani/spark
5b21139dbf3bd09cb3a590bd0ffb857ea92dc23c
[ "Apache-2.0" ]
null
null
null
python/pyspark/mllib/evaluation.py
shagunsodhani/spark
5b21139dbf3bd09cb3a590bd0ffb857ea92dc23c
[ "Apache-2.0" ]
null
null
null
python/pyspark/mllib/evaluation.py
shagunsodhani/spark
5b21139dbf3bd09cb3a590bd0ffb857ea92dc23c
[ "Apache-2.0" ]
null
null
null
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyspark import since from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc from pyspark.sql import SQLContext from pyspark.sql.types import StructField, StructType, DoubleType, IntegerType, ArrayType __all__ = ['BinaryClassificationMetrics', 'RegressionMetrics', 'MulticlassMetrics', 'RankingMetrics'] class BinaryClassificationMetrics(JavaModelWrapper): """ Evaluator for binary classification. :param scoreAndLabels: an RDD of (score, label) pairs >>> scoreAndLabels = sc.parallelize([ ... (0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)], 2) >>> metrics = BinaryClassificationMetrics(scoreAndLabels) >>> metrics.areaUnderROC 0.70... >>> metrics.areaUnderPR 0.83... >>> metrics.unpersist() .. versionadded:: 1.4.0 """ def __init__(self, scoreAndLabels): sc = scoreAndLabels.ctx sql_ctx = SQLContext.getOrCreate(sc) df = sql_ctx.createDataFrame(scoreAndLabels, schema=StructType([ StructField("score", DoubleType(), nullable=False), StructField("label", DoubleType(), nullable=False)])) java_class = sc._jvm.org.apache.spark.mllib.evaluation.BinaryClassificationMetrics java_model = java_class(df._jdf) super(BinaryClassificationMetrics, self).__init__(java_model) @property @since('1.4.0') def areaUnderROC(self): """ Computes the area under the receiver operating characteristic (ROC) curve. """ return self.call("areaUnderROC") @property @since('1.4.0') def areaUnderPR(self): """ Computes the area under the precision-recall curve. """ return self.call("areaUnderPR") @since('1.4.0') def unpersist(self): """ Unpersists intermediate RDDs used in the computation. """ self.call("unpersist") class RegressionMetrics(JavaModelWrapper): """ Evaluator for regression. :param predictionAndObservations: an RDD of (prediction, observation) pairs. >>> predictionAndObservations = sc.parallelize([ ... (2.5, 3.0), (0.0, -0.5), (2.0, 2.0), (8.0, 7.0)]) >>> metrics = RegressionMetrics(predictionAndObservations) >>> metrics.explainedVariance 8.859... >>> metrics.meanAbsoluteError 0.5... >>> metrics.meanSquaredError 0.37... >>> metrics.rootMeanSquaredError 0.61... >>> metrics.r2 0.94... .. versionadded:: 1.4.0 """ def __init__(self, predictionAndObservations): sc = predictionAndObservations.ctx sql_ctx = SQLContext.getOrCreate(sc) df = sql_ctx.createDataFrame(predictionAndObservations, schema=StructType([ StructField("prediction", DoubleType(), nullable=False), StructField("observation", DoubleType(), nullable=False)])) java_class = sc._jvm.org.apache.spark.mllib.evaluation.RegressionMetrics java_model = java_class(df._jdf) super(RegressionMetrics, self).__init__(java_model) @property @since('1.4.0') def explainedVariance(self): """ Returns the explained variance regression score. explainedVariance = 1 - variance(y - \hat{y}) / variance(y) """ return self.call("explainedVariance") @property @since('1.4.0') def meanAbsoluteError(self): """ Returns the mean absolute error, which is a risk function corresponding to the expected value of the absolute error loss or l1-norm loss. """ return self.call("meanAbsoluteError") @property @since('1.4.0') def meanSquaredError(self): """ Returns the mean squared error, which is a risk function corresponding to the expected value of the squared error loss or quadratic loss. """ return self.call("meanSquaredError") @property @since('1.4.0') def rootMeanSquaredError(self): """ Returns the root mean squared error, which is defined as the square root of the mean squared error. """ return self.call("rootMeanSquaredError") @property @since('1.4.0') def r2(self): """ Returns R^2^, the coefficient of determination. """ return self.call("r2") class MulticlassMetrics(JavaModelWrapper): """ Evaluator for multiclass classification. :param predictionAndLabels: an RDD of (prediction, label) pairs. >>> predictionAndLabels = sc.parallelize([(0.0, 0.0), (0.0, 1.0), (0.0, 0.0), ... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]) >>> metrics = MulticlassMetrics(predictionAndLabels) >>> metrics.confusionMatrix().toArray() array([[ 2., 1., 1.], [ 1., 3., 0.], [ 0., 0., 1.]]) >>> metrics.falsePositiveRate(0.0) 0.2... >>> metrics.precision(1.0) 0.75... >>> metrics.recall(2.0) 1.0... >>> metrics.fMeasure(0.0, 2.0) 0.52... >>> metrics.precision() 0.66... >>> metrics.recall() 0.66... >>> metrics.weightedFalsePositiveRate 0.19... >>> metrics.weightedPrecision 0.68... >>> metrics.weightedRecall 0.66... >>> metrics.weightedFMeasure() 0.66... >>> metrics.weightedFMeasure(2.0) 0.65... .. versionadded:: 1.4.0 """ def __init__(self, predictionAndLabels): sc = predictionAndLabels.ctx sql_ctx = SQLContext.getOrCreate(sc) df = sql_ctx.createDataFrame(predictionAndLabels, schema=StructType([ StructField("prediction", DoubleType(), nullable=False), StructField("label", DoubleType(), nullable=False)])) java_class = sc._jvm.org.apache.spark.mllib.evaluation.MulticlassMetrics java_model = java_class(df._jdf) super(MulticlassMetrics, self).__init__(java_model) @since('1.4.0') def confusionMatrix(self): """ Returns confusion matrix: predicted classes are in columns, they are ordered by class label ascending, as in "labels". """ return self.call("confusionMatrix") @since('1.4.0') def truePositiveRate(self, label): """ Returns true positive rate for a given label (category). """ return self.call("truePositiveRate", label) @since('1.4.0') def falsePositiveRate(self, label): """ Returns false positive rate for a given label (category). """ return self.call("falsePositiveRate", label) @since('1.4.0') def precision(self, label=None): """ Returns precision or precision for a given label (category) if specified. """ if label is None: return self.call("precision") else: return self.call("precision", float(label)) @since('1.4.0') def recall(self, label=None): """ Returns recall or recall for a given label (category) if specified. """ if label is None: return self.call("recall") else: return self.call("recall", float(label)) @since('1.4.0') def fMeasure(self, label=None, beta=None): """ Returns f-measure or f-measure for a given label (category) if specified. """ if beta is None: if label is None: return self.call("fMeasure") else: return self.call("fMeasure", label) else: if label is None: raise Exception("If the beta parameter is specified, label can not be none") else: return self.call("fMeasure", label, beta) @property @since('1.4.0') def weightedTruePositiveRate(self): """ Returns weighted true positive rate. (equals to precision, recall and f-measure) """ return self.call("weightedTruePositiveRate") @property @since('1.4.0') def weightedFalsePositiveRate(self): """ Returns weighted false positive rate. """ return self.call("weightedFalsePositiveRate") @property @since('1.4.0') def weightedRecall(self): """ Returns weighted averaged recall. (equals to precision, recall and f-measure) """ return self.call("weightedRecall") @property @since('1.4.0') def weightedPrecision(self): """ Returns weighted averaged precision. """ return self.call("weightedPrecision") @since('1.4.0') def weightedFMeasure(self, beta=None): """ Returns weighted averaged f-measure. """ if beta is None: return self.call("weightedFMeasure") else: return self.call("weightedFMeasure", beta) class RankingMetrics(JavaModelWrapper): """ Evaluator for ranking algorithms. :param predictionAndLabels: an RDD of (predicted ranking, ground truth set) pairs. >>> predictionAndLabels = sc.parallelize([ ... ([1, 6, 2, 7, 8, 3, 9, 10, 4, 5], [1, 2, 3, 4, 5]), ... ([4, 1, 5, 6, 2, 7, 3, 8, 9, 10], [1, 2, 3]), ... ([1, 2, 3, 4, 5], [])]) >>> metrics = RankingMetrics(predictionAndLabels) >>> metrics.precisionAt(1) 0.33... >>> metrics.precisionAt(5) 0.26... >>> metrics.precisionAt(15) 0.17... >>> metrics.meanAveragePrecision 0.35... >>> metrics.ndcgAt(3) 0.33... >>> metrics.ndcgAt(10) 0.48... .. versionadded:: 1.4.0 """ def __init__(self, predictionAndLabels): sc = predictionAndLabels.ctx sql_ctx = SQLContext.getOrCreate(sc) df = sql_ctx.createDataFrame(predictionAndLabels, schema=sql_ctx._inferSchema(predictionAndLabels)) java_model = callMLlibFunc("newRankingMetrics", df._jdf) super(RankingMetrics, self).__init__(java_model) @since('1.4.0') def precisionAt(self, k): """ Compute the average precision of all the queries, truncated at ranking position k. If for a query, the ranking algorithm returns n (n < k) results, the precision value will be computed as #(relevant items retrieved) / k. This formula also applies when the size of the ground truth set is less than k. If a query has an empty ground truth set, zero will be used as precision together with a log warning. """ return self.call("precisionAt", int(k)) @property @since('1.4.0') def meanAveragePrecision(self): """ Returns the mean average precision (MAP) of all the queries. If a query has an empty ground truth set, the average precision will be zero and a log warining is generated. """ return self.call("meanAveragePrecision") @since('1.4.0') def ndcgAt(self, k): """ Compute the average NDCG value of all the queries, truncated at ranking position k. The discounted cumulative gain at position k is computed as: sum,,i=1,,^k^ (2^{relevance of ''i''th item}^ - 1) / log(i + 1), and the NDCG is obtained by dividing the DCG value on the ground truth set. In the current implementation, the relevance value is binary. If a query has an empty ground truth set, zero will be used as NDCG together with a log warning. """ return self.call("ndcgAt", int(k)) class MultilabelMetrics(JavaModelWrapper): """ Evaluator for multilabel classification. :param predictionAndLabels: an RDD of (predictions, labels) pairs, both are non-null Arrays, each with unique elements. >>> predictionAndLabels = sc.parallelize([([0.0, 1.0], [0.0, 2.0]), ([0.0, 2.0], [0.0, 1.0]), ... ([], [0.0]), ([2.0], [2.0]), ([2.0, 0.0], [2.0, 0.0]), ... ([0.0, 1.0, 2.0], [0.0, 1.0]), ([1.0], [1.0, 2.0])]) >>> metrics = MultilabelMetrics(predictionAndLabels) >>> metrics.precision(0.0) 1.0 >>> metrics.recall(1.0) 0.66... >>> metrics.f1Measure(2.0) 0.5 >>> metrics.precision() 0.66... >>> metrics.recall() 0.64... >>> metrics.f1Measure() 0.63... >>> metrics.microPrecision 0.72... >>> metrics.microRecall 0.66... >>> metrics.microF1Measure 0.69... >>> metrics.hammingLoss 0.33... >>> metrics.subsetAccuracy 0.28... >>> metrics.accuracy 0.54... .. versionadded:: 1.4.0 """ def __init__(self, predictionAndLabels): sc = predictionAndLabels.ctx sql_ctx = SQLContext.getOrCreate(sc) df = sql_ctx.createDataFrame(predictionAndLabels, schema=sql_ctx._inferSchema(predictionAndLabels)) java_class = sc._jvm.org.apache.spark.mllib.evaluation.MultilabelMetrics java_model = java_class(df._jdf) super(MultilabelMetrics, self).__init__(java_model) @since('1.4.0') def precision(self, label=None): """ Returns precision or precision for a given label (category) if specified. """ if label is None: return self.call("precision") else: return self.call("precision", float(label)) @since('1.4.0') def recall(self, label=None): """ Returns recall or recall for a given label (category) if specified. """ if label is None: return self.call("recall") else: return self.call("recall", float(label)) @since('1.4.0') def f1Measure(self, label=None): """ Returns f1Measure or f1Measure for a given label (category) if specified. """ if label is None: return self.call("f1Measure") else: return self.call("f1Measure", float(label)) @property @since('1.4.0') def microPrecision(self): """ Returns micro-averaged label-based precision. (equals to micro-averaged document-based precision) """ return self.call("microPrecision") @property @since('1.4.0') def microRecall(self): """ Returns micro-averaged label-based recall. (equals to micro-averaged document-based recall) """ return self.call("microRecall") @property @since('1.4.0') def microF1Measure(self): """ Returns micro-averaged label-based f1-measure. (equals to micro-averaged document-based f1-measure) """ return self.call("microF1Measure") @property @since('1.4.0') def hammingLoss(self): """ Returns Hamming-loss. """ return self.call("hammingLoss") @property @since('1.4.0') def subsetAccuracy(self): """ Returns subset accuracy. (for equal sets of labels) """ return self.call("subsetAccuracy") @property @since('1.4.0') def accuracy(self): """ Returns accuracy. """ return self.call("accuracy") def _test(): import doctest from pyspark.sql import SparkSession import pyspark.mllib.evaluation globs = pyspark.mllib.evaluation.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("mllib.evaluation tests")\ .getOrCreate() globs['sc'] = spark.sparkContext (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
30.962617
99
0.59668
fecc9796c919a2e1c331528fd459e181670d5036
220
py
Python
tor-pysock.py
hanguangchao/scrapy_awesome
48a64fb3fdaeb00159b3b51e7c53f4d358e83743
[ "MIT" ]
null
null
null
tor-pysock.py
hanguangchao/scrapy_awesome
48a64fb3fdaeb00159b3b51e7c53f4d358e83743
[ "MIT" ]
null
null
null
tor-pysock.py
hanguangchao/scrapy_awesome
48a64fb3fdaeb00159b3b51e7c53f4d358e83743
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import socket import socks import requests socks.set_default_proxy(socks.SOCKS5, "127.0.0.1", 9050) socket.socket = socks.socksocket print(requests.get('http://api.ipify.org?format=json').text)
22
60
0.736364
41a766ac529d4197cfa4b35fa9da56dc7af837eb
655
py
Python
tests/unit/systems/nfs/client_test.py
gamechanger/dusty
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
[ "MIT" ]
421
2015-06-02T16:29:59.000Z
2021-06-03T18:44:42.000Z
tests/unit/systems/nfs/client_test.py
gamechanger/dusty
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
[ "MIT" ]
404
2015-06-02T20:23:42.000Z
2019-08-21T16:59:41.000Z
tests/unit/systems/nfs/client_test.py
gamechanger/dusty
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
[ "MIT" ]
16
2015-06-16T17:21:02.000Z
2020-03-27T02:27:09.000Z
from mock import Mock, patch from dusty.systems.nfs import client from ....testcases import DustyTestCase class TestNFSClient(DustyTestCase): @patch('dusty.systems.nfs.client.get_host_ip') def test_mount_args_string(self, fake_get_host_ip): fake_get_host_ip.return_value = '192.168.59.3' fake_repo = Mock() fake_repo.local_path = '/repo/local/path' fake_repo.vm_path = '/persist/repos/remote/path' expected_mount_args = '-t nfs -o async,udp,noatime,nfsvers=3 192.168.59.3:/repo/local/path /persist/repos/remote/path' self.assertEqual(expected_mount_args, client._nfs_mount_args_string(fake_repo))
43.666667
126
0.734351
73fe5b99e61507a10dcc46583ac3453bc15943c5
11,108
py
Python
test/test_rosdep_source.py
haraisao/rosdep
3ee197d8b36e706ab6f8abe5db54f80231cad00a
[ "BSD-3-Clause" ]
null
null
null
test/test_rosdep_source.py
haraisao/rosdep
3ee197d8b36e706ab6f8abe5db54f80231cad00a
[ "BSD-3-Clause" ]
1
2018-11-14T11:46:12.000Z
2018-11-15T00:14:04.000Z
test/test_rosdep_source.py
at-wat/rosdep
6d41913e47ef3ca2f4960d93a61cde3445a3c5fb
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Author Ken Conley/kwc@willowgarage.com import os import yaml rep122_install_command = """#!/bin/bash set -o errexit mkdir -p build cd build cmake .. make echo "About to run checkinstall make install" sudo checkinstall -y --nodoc --pkgname=yaml-cpp-sourcedep make install """ rep122_check_presence_command = """#!/bin/bash dpkg-query -W -f='${Package} ${Status}\\n' yaml-cpp-sourcedep | awk '{\\ if ($4 =="installed") exit 0 else print "yaml-cpp-sourcedep not installed" exit 1}' """ REP112_MD5SUM = '57cb9faf930e9c4f0822be8b27798248' def get_test_dir(): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'source')) def _subtest_rep112_rdmanifest(resolved): test_dir = get_test_dir() path = os.path.join(test_dir, 'rep112-example.rdmanifest') manifest = yaml.load(open(path)) assert resolved.manifest == manifest assert resolved.manifest_url == path assert resolved.install_command == rep122_install_command assert resolved.check_presence_command == rep122_check_presence_command assert len(resolved.check_presence_command) == len(rep122_check_presence_command), '%s %s' % (len(resolved.check_presence_command), len(rep122_check_presence_command)) assert resolved.exec_path == 'yaml-cpp-0.2.5' assert resolved.tarball == 'http://download.ros.org/downloads/yaml-cpp-0.2.5.tar.gz' assert resolved.alternate_tarball is None assert resolved.tarball_md5sum == 'b17dc36055cd2259c88b2602601415d9' def test_SourceInstall(): from rosdep2.platforms.source import InvalidRdmanifest, SourceInstall # tripwire SourceInstall() # test unpacking of dict manifest = { 'md5sum': 'fake-md5', 'exec-path': '/path', 'install-script': 'echo hello', 'check-presence-script': 'hello there', 'uri': 'http://ros.org/', 'alternate-uri': 'http://turtlebot.com/', 'depends': ['foo', 'bar'], } resolved = SourceInstall.from_manifest(manifest, 'fake-url') assert resolved.manifest == manifest assert resolved.manifest_url == 'fake-url' assert resolved.install_command == 'echo hello' assert resolved.check_presence_command == 'hello there' assert resolved.exec_path == '/path' assert resolved.tarball == 'http://ros.org/' assert resolved.alternate_tarball == 'http://turtlebot.com/' assert resolved.tarball_md5sum == 'fake-md5' test_dir = get_test_dir() path = os.path.join(test_dir, 'rep112-example.rdmanifest') manifest = yaml.load(open(path)) resolved = SourceInstall.from_manifest(manifest, path) _subtest_rep112_rdmanifest(resolved) # TODO: test depends # test with bad dicts manifest = { 'md5sum': 'fake-md5', 'exec-path': '/path', 'install-script': 'echo hello', 'check-presence-script': 'hello there', 'alternate-uri': 'http://turtlebot.com/', 'depends': ['foo', 'bar'], } # uri is required try: SourceInstall.from_manifest(manifest, 'foo') assert False, 'should have raised' except InvalidRdmanifest as e: pass # test defaults manifest = dict(uri='http://ros.org/') resolved = SourceInstall.from_manifest(manifest, 'foo') assert resolved.exec_path == '.' assert resolved.install_command == '' assert resolved.check_presence_command == '' assert resolved.alternate_tarball is None assert resolved.tarball_md5sum is None def test_is_installed(): from rosdep2.platforms.source import SourceInstaller, SourceInstall resolved = SourceInstall() resolved.check_presence_command = """#!/bin/bash exit 0 """ installer = SourceInstaller() assert installer.is_installed(resolved) def test_source_detect(): from rosdep2.platforms.source import source_detect, SourceInstall resolved = SourceInstall() resolved.check_presence_command = """#!/bin/bash exit 0 """ assert [] == source_detect([]) assert [resolved] == source_detect([resolved]) def yes(*args, **kwds): return 0 def no(*args, **kwds): return 1 resolved = [SourceInstall(), SourceInstall(), SourceInstall(), SourceInstall()] for r in resolved: r.check_presence_command = '' retval = source_detect(resolved, exec_fn=yes) assert resolved == retval, retval assert [] == source_detect(resolved, exec_fn=no) def test_SourceInstaller_get_install_command(): from rosdep2.platforms.source import SourceInstaller, SourceInstall installer = SourceInstaller() resolved = SourceInstall() resolved.manifest_url = 'http://fake/foo' resolved.check_presence_command = """#!/bin/bash exit 1 """ commands = installer.get_install_command([resolved]) assert len(commands) == 1 assert commands[0] == ['rosdep-source', 'install', 'http://fake/foo'] resolved = SourceInstall() resolved.manifest_url = 'http://fake/foo' resolved.check_presence_command = """#!/bin/bash exit 0 """ commands = installer.get_install_command([resolved]) assert not(commands) def test_SourceInstaller_resolve(): from rosdep2.platforms.source import SourceInstaller, InvalidData test_dir = get_test_dir() url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest') md5sum_good = REP112_MD5SUM md5sum_bad = 'fake' installer = SourceInstaller() try: installer.resolve({}) assert False, 'should have raised' except InvalidData: pass try: installer.resolve(dict(uri=url, md5sum=md5sum_bad)) assert False, 'should have raised' except InvalidData: pass resolved = installer.resolve(dict(uri=url, md5sum=md5sum_good)) assert type(resolved) == list assert len(resolved) == 1 # test for reinstall (to check the depends in rdmanifest) dependencies = installer.get_depends(dict(uri=url, md5sum=md5sum_good)) assert dependencies == ['checkinstall'], 'Dependencies should resolve to checkinstall listed in the rdmanifest.' resolved = resolved[0] assert resolved.install_command == rep122_install_command assert resolved.check_presence_command == rep122_check_presence_command # test again to activate caching resolved = installer.resolve(dict(uri=url, md5sum=md5sum_good)) assert type(resolved) == list, 'Cache should also return a list' assert len(resolved) == 1 resolved = resolved[0] assert resolved.install_command == rep122_install_command assert resolved.check_presence_command == rep122_check_presence_command def test_load_rdmanifest(): from rosdep2.platforms.source import load_rdmanifest, InvalidRdmanifest # load_rdmanifest is just a YAML unmarshaller with an exception change assert 'str' == load_rdmanifest('str') assert {'a': 'b'} == load_rdmanifest('{a: b}') try: load_rdmanifest(';lkajsdf;klj ;l: a;kljdf;: asdf\n ;asdfl;kj') assert False, 'should have raised' except InvalidRdmanifest as e: pass def test_get_file_hash(): from rosdep2.platforms.source import get_file_hash path = os.path.join(get_test_dir(), 'rep112-example.rdmanifest') assert REP112_MD5SUM == get_file_hash(path) def test_fetch_file(): test_dir = get_test_dir() with open(os.path.join(test_dir, 'rep112-example.rdmanifest')) as f: expected = f.read() from rosdep2.platforms.source import fetch_file url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest') contents, error = fetch_file(url, REP112_MD5SUM) assert not error assert contents == expected contents, error = fetch_file(url, 'badmd5') assert bool(error), 'should have errored' assert not contents contents, error = fetch_file('http://badhostname.willowgarage.com', 'md5sum') assert not contents assert bool(error), 'should have errored' def test_download_rdmanifest(): test_dir = get_test_dir() with open(os.path.join(test_dir, 'rep112-example.rdmanifest')) as f: expected = yaml.load(f) from rosdep2.platforms.source import download_rdmanifest, DownloadFailed url = 'file://%s' % os.path.join(test_dir, 'rep112-example.rdmanifest') contents, download_url = download_rdmanifest(url, REP112_MD5SUM) assert contents == expected assert download_url == url # test alt_url contents, download_url = download_rdmanifest('http://badhostname.willowgarage.com/', REP112_MD5SUM, alt_url=url) assert contents == expected assert download_url == url # test md5sum validate try: contents, error = download_rdmanifest(url, 'badmd5') assert False, 'should have errored' except DownloadFailed: pass # test download verify try: contents, error = download_rdmanifest('http://badhostname.willowgarage.com', 'fakemd5') assert False, 'should have errored' except DownloadFailed: pass def test_install_from_file(): from rosdep2.platforms.source import install_from_file f = os.path.join(get_test_dir(), 'noop-not-installed.rdmanifest') install_from_file(f) def test_install_source(): from rosdep2.platforms.source import install_source, SourceInstall resolved = SourceInstall() resolved.tarball = 'https://github.com/ros-infrastructure/rosdep/raw/master/test/source/foo.tar.gz' resolved.tarball_md5sum = 'fd34dc39f8f192b97fcc191fe0a6befc' resolved.install_command = """#!/bin/sh exit 0 """ resolved.exec_path = '' install_source(resolved)
34.821317
171
0.708048
7137af9d387bd00113f7bd8f1c724e9a740311cc
4,524
py
Python
day-8/decoder.py
PeterPrice06/advent-of-code-2021
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
[ "MIT" ]
null
null
null
day-8/decoder.py
PeterPrice06/advent-of-code-2021
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
[ "MIT" ]
null
null
null
day-8/decoder.py
PeterPrice06/advent-of-code-2021
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
[ "MIT" ]
null
null
null
from typing import List, Tuple class Decoder(): def __init__(self, line_strs: List[str]) -> None: self.signal_patterns = [] self.output_values = [] for line in line_strs: line_halves = line.split(' | ') self.signal_patterns.append(line_halves[0]) self.output_values.append(line_halves[1]) self.words = {} for signal_pattern_line in self.signal_patterns: for word in signal_pattern_line.split(): if word not in self.words: self.words[word] = 1 else: self.words[word] += 1 self.words_with_unique_lengths, self.unique_lengths = self.get_unique_lengths() def __str__(self) -> str: string = f'words: {self.words}\n' for i in range(len(self.signal_patterns)): string += self.signal_patterns[i] + ' | ' + self.output_values[i] + '\n' return string def get_unique_lengths(self) -> Tuple[List[str], List[int]]: words_with_unique_lengths = [] unique_lengths = [] word_length_counts = {} for word in self.words.keys(): if len(word) in word_length_counts: word_length_counts[len(word)] += 1 else: word_length_counts[len(word)] = 1 for word, count in word_length_counts.items(): if count == 1: words_with_unique_lengths.append(word) unique_lengths.append(len(word)) return words_with_unique_lengths, unique_lengths def count_unique_in_output_value(self) -> int: # only count unique in output_value count = 0 for output_value in self.output_values: for output_word in output_value.split(): if self.is_unique(output_word): count += 1 return count def is_unique(self, word: str) -> int: # check if word is 1, 4, 7, or 8 length = len(word) return length in [2, 3, 4, 7] def sum_decoded_output(self) -> int: sum = 0 for i, output_value in enumerate(self.output_values): output = 0 for output_word in output_value.split(): output *= 10 digit = self.decode(output_word, self.signal_patterns[i]) output += digit print(output) sum += output return sum def decode(self, word: str, signal_patterns: str) -> int: if len(word) == 2: return 1 elif len(word) == 3: return 7 elif len(word) == 4: return 4 elif len(word) == 5: if self.same_lines_as_one(word, signal_patterns): return 3 elif self.all_but_one_line_of_four(word, signal_patterns): return 5 else: return 2 elif len(word) == 6: if self.same_lines_as_one(word, signal_patterns): if self.same_lines_as_four(word, signal_patterns): return 9 else: return 0 return 6 elif len(word) == 7: return 8 def same_lines_as_one(self, word: str, signal_patterns: str) -> bool: for signal_pattern_word in signal_patterns.split(): if len(signal_pattern_word) == 2: return signal_pattern_word[0] in word and signal_pattern_word[1] in word print('WARNING: signal pattern does not contain digit 1') return False def same_lines_as_four(self, word: str, signal_patterns: str) -> bool: for signal_pattern_word in signal_patterns.split(): if len(signal_pattern_word) == 4: return signal_pattern_word[0] in word and signal_pattern_word[1] in word and \ signal_pattern_word[2] in word and signal_pattern_word[3] in word print('WARNING: signal pattern does not contain digit 4') return False def all_but_one_line_of_four(self, word: str, signal_patterns: str) -> bool: for signal_pattern_word in signal_patterns.split(): if len(signal_pattern_word) == 4: return int(signal_pattern_word[0] in word) + int(signal_pattern_word[1] in word) + \ int(signal_pattern_word[2] in word) + int(signal_pattern_word[3] in word) == 3 print('WARNING: signal pattern does not contain digit 4') return False
38.666667
100
0.573386
878c1d235e696b4e047ddfb184e2b3656af6ecf8
20,701
py
Python
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_04_01/operations/_activity_log_alerts_operations.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
1
2021-06-02T08:01:35.000Z
2021-06-02T08:01:35.000Z
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_04_01/operations/_activity_log_alerts_operations.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
226
2019-07-24T07:57:21.000Z
2019-10-15T01:07:24.000Z
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_04_01/operations/_activity_log_alerts_operations.py
iscai-msft/azure-sdk-for-python
83715b95c41e519d5be7f1180195e2fba136fc0f
[ "MIT" ]
1
2019-06-17T22:18:23.000Z
2019-06-17T22:18:23.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class ActivityLogAlertsOperations(object): """ActivityLogAlertsOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-04-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-01" self.config = config def create_or_update( self, resource_group_name, activity_log_alert_name, activity_log_alert, custom_headers=None, raw=False, **operation_config): """Create a new activity log alert or update an existing one. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param activity_log_alert_name: The name of the activity log alert. :type activity_log_alert_name: str :param activity_log_alert: The activity log alert to create or use for the update. :type activity_log_alert: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ActivityLogAlertResource or ClientRawResponse if raw=true :rtype: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ # Construct URL url = self.create_or_update.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'activityLogAlertName': self._serialize.url("activity_log_alert_name", activity_log_alert_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(activity_log_alert, 'ActivityLogAlertResource') # Construct and send request request = self._client.put(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ActivityLogAlertResource', response) if response.status_code == 201: deserialized = self._deserialize('ActivityLogAlertResource', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}'} def get( self, resource_group_name, activity_log_alert_name, custom_headers=None, raw=False, **operation_config): """Get an activity log alert. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param activity_log_alert_name: The name of the activity log alert. :type activity_log_alert_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ActivityLogAlertResource or ClientRawResponse if raw=true :rtype: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ # Construct URL url = self.get.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'activityLogAlertName': self._serialize.url("activity_log_alert_name", activity_log_alert_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ActivityLogAlertResource', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}'} def delete( self, resource_group_name, activity_log_alert_name, custom_headers=None, raw=False, **operation_config): """Delete an activity log alert. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param activity_log_alert_name: The name of the activity log alert. :type activity_log_alert_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ # Construct URL url = self.delete.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'activityLogAlertName': self._serialize.url("activity_log_alert_name", activity_log_alert_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}'} def update( self, resource_group_name, activity_log_alert_name, tags=None, enabled=True, custom_headers=None, raw=False, **operation_config): """Updates an existing ActivityLogAlertResource's tags. To update other fields use the CreateOrUpdate method. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param activity_log_alert_name: The name of the activity log alert. :type activity_log_alert_name: str :param tags: Resource tags :type tags: dict[str, str] :param enabled: Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated. :type enabled: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ActivityLogAlertResource or ClientRawResponse if raw=true :rtype: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ activity_log_alert_patch = models.ActivityLogAlertPatchBody(tags=tags, enabled=enabled) # Construct URL url = self.update.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'activityLogAlertName': self._serialize.url("activity_log_alert_name", activity_log_alert_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(activity_log_alert_patch, 'ActivityLogAlertPatchBody') # Construct and send request request = self._client.patch(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ActivityLogAlertResource', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}'} def list_by_subscription_id( self, custom_headers=None, raw=False, **operation_config): """Get a list of all activity log alerts in a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ActivityLogAlertResource :rtype: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResourcePaged[~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource] :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_subscription_id.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ActivityLogAlertResourcePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_subscription_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/activityLogAlerts'} def list_by_resource_group( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Get a list of all activity log alerts in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ActivityLogAlertResource :rtype: ~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResourcePaged[~azure.mgmt.monitor.v2017_04_01.models.ActivityLogAlertResource] :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2017_04_01.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ActivityLogAlertResourcePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts'}
48.030162
179
0.678421