max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
checkio/Scientific Expedition/Morse Clock/test_morse_clock.py
|
KenMercusLai/checkio
| 39
|
12781451
|
<gh_stars>10-100
import unittest
from morse_clock import checkio, to_binary
def test_to_binary():
assert to_binary('0') == '0000'
assert to_binary('1') == '0001'
assert to_binary('2') == '0010'
assert to_binary('3') == '0011'
assert to_binary('4') == '0100'
assert to_binary('5') == '0101'
assert to_binary('6') == '0110'
assert to_binary('7') == '0111'
assert to_binary('8') == '1000'
assert to_binary('9') == '1001'
assert to_binary('0', 1) == '0'
assert to_binary('1', 1) == '1'
assert to_binary('2', 2) == '10'
assert to_binary('3', 2) == '11'
assert to_binary('4', 3) == '100'
assert to_binary('5', 3) == '101'
assert to_binary('6', 3) == '110'
assert to_binary('7', 3) == '111'
assert to_binary('8', 4) == '1000'
assert to_binary('9', 4) == '1001'
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{
"input": "10:37:49",
"answer": ".- .... : .-- .--- : -.. -..-",
"explanation": "103749",
},
{
"input": "21:34:56",
"answer": "-. ...- : .-- .-.. : -.- .--.",
"explanation": "213456",
},
{
"input": "00:1:02",
"answer": ".. .... : ... ...- : ... ..-.",
"explanation": "000102",
},
{
"input": "23:59:59",
"answer": "-. ..-- : -.- -..- : -.- -..-",
"explanation": "235959",
},
{
"input": "0:10:2",
"answer": ".. .... : ..- .... : ... ..-.",
"explanation": "001002",
},
],
"Extra": [
{
"input": "17:0:28",
"answer": ".- .--- : ... .... : .-. -...",
"explanation": "170028",
},
{
"input": "7:41:37",
"answer": ".. .--- : -.. ...- : .-- .---",
"explanation": "074137",
},
{
"input": "4:25:13",
"answer": ".. .-.. : .-. .-.- : ..- ..--",
"explanation": "042513",
},
{
"input": "15:18:8",
"answer": ".- .-.- : ..- -... : ... -...",
"explanation": "151808",
},
{
"input": "2:32:41",
"answer": ".. ..-. : .-- ..-. : -.. ...-",
"explanation": "023241",
},
{
"input": "9:44:31",
"answer": ".. -..- : -.. .-.. : .-- ...-",
"explanation": "094431",
},
{
"input": "3:8:2",
"answer": ".. ..-- : ... -... : ... ..-.",
"explanation": "030802",
},
{
"input": "5:1:9",
"answer": ".. .-.- : ... ...- : ... -..-",
"explanation": "050109",
},
{
"input": "09:02:08",
"answer": ".. -..- : ... ..-. : ... -...",
"explanation": "090208",
},
{
"input": "13:5:3",
"answer": ".- ..-- : ... .-.- : ... ..--",
"explanation": "130503",
},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert checkio(i['input']) == i['answer'], i['input']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert checkio(i['input']) == i['answer'], i['input']
if __name__ == "__main__": # pragma: no cover
unittest.main()
| 2.921875
| 3
|
Ejercicios/Diccionarios y sus metodos.py
|
dannieldev/Fundamentos-de-Python
| 0
|
12781452
|
dicionario = {
"redes_sociales":['Twitter','Facebook','LinkedIn'],
3:"Tres",
"Hola":"Mundo",
"eli":"elininame"
}
print (dicionario)
print (dicionario.pop(3))
del(dicionario["eli"])
print (dicionario)
dicionario.clear()
print (dicionario)
dicionario["clave_nueva"] = "Valor"
print (dicionario)
copidic2 = dicionario.copy()
print(copidic2)
| 3.390625
| 3
|
EX.py/EX0010.py
|
Allen-Lee-Junior/ex-em-python
| 1
|
12781453
|
<filename>EX.py/EX0010.py
real = float(input('Quanto dinheiro voce tem na carteira?:'))
Dolar = real / 5.34
Renminbi = real / 0.81
Eurofrançes = real / 6.35
EuroPortugal = real / 6.35
Libras = real / 7.13
peso = real / 15.17
print('com {} RS$ voce compra US${:.2f}'.format(real, Dolar, ))
print('com {} RS$ voce compra 人民币{:.2f}'.format(real, Renminbi, ))
print('com {} RS$ voce compra €{:.2f}'.format(real, Eurofrançes))
print('com {} RS$ voce compra €{:.2f}'.format(real, EuroPortugal, ))
print('com {} RS$ voce compra £{:.2f}'.format(real, Libras, ))
print('com {} RS$ voce compra ${:.2f}'.format(real, peso, ))
| 3.875
| 4
|
security/contrib/debug_toolbar_log/middleware.py
|
jsilhan/django-security
| 0
|
12781454
|
<reponame>jsilhan/django-security
from django.conf import settings as django_settings
from debug_toolbar.toolbar import DebugToolbar
from security.config import settings
from .models import DebugToolbarData
class DebugToolbarLogMiddleware:
def __init__(self, get_response=None):
self.get_response = get_response
super().__init__()
def __call__(self, request):
if settings.DEBUG_TOOLBAR and django_settings.DEBUG:
toolbar = DebugToolbar(request, self.get_response)
# Activate instrumentation ie. monkey-patch.
for panel in toolbar.enabled_panels:
panel.enable_instrumentation()
try:
# Run panels like Django middleware.
response = toolbar.process_request(request)
finally:
# Deactivate instrumentation ie. monkey-unpatch. This must run
# regardless of the response. Keep 'return' clauses below.
for panel in reversed(toolbar.enabled_panels):
panel.disable_instrumentation()
for panel in reversed(toolbar.enabled_panels):
panel.generate_stats(request, response)
panel.generate_server_timing(request, response)
if getattr(request, 'input_logged_request', False):
DebugToolbarData.objects.create(
logged_request=request.input_logged_request, toolbar=toolbar.render_toolbar()
)
return response
else:
return self.get_response(request)
| 2
| 2
|
hummingbot/connector/exchange/huobi/huobi_api_order_book_data_source.py
|
BGTCapital/hummingbot
| 3,027
|
12781455
|
#!/usr/bin/env python
import asyncio
import logging
import hummingbot.connector.exchange.huobi.huobi_constants as CONSTANTS
from collections import defaultdict
from typing import (
Any,
Dict,
List,
Optional,
)
from hummingbot.connector.exchange.huobi.huobi_order_book import HuobiOrderBook
from hummingbot.connector.exchange.huobi.huobi_utils import (
convert_from_exchange_trading_pair,
convert_to_exchange_trading_pair,
build_api_factory,
)
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
class HuobiAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
HEARTBEAT_INTERVAL = 30.0 # seconds
ORDER_BOOK_SNAPSHOT_DELAY = 60 * 60 # expressed in seconds
TRADE_CHANNEL_SUFFIX = "trade.detail"
ORDERBOOK_CHANNEL_SUFFIX = "depth.step0"
_haobds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._haobds_logger is None:
cls._haobds_logger = logging.getLogger(__name__)
return cls._haobds_logger
def __init__(self,
trading_pairs: List[str],
api_factory: Optional[WebAssistantsFactory] = None,
):
super().__init__(trading_pairs)
self._api_factory = api_factory or build_api_factory()
self._rest_assistant: Optional[RESTAssistant] = None
self._ws_assistant: Optional[WSAssistant] = None
self._message_queue: Dict[str, asyncio.Queue] = defaultdict(asyncio.Queue)
async def _get_rest_assistant(self) -> RESTAssistant:
if self._rest_assistant is None:
self._rest_assistant = await self._api_factory.get_rest_assistant()
return self._rest_assistant
async def _get_ws_assistant(self) -> WSAssistant:
if self._ws_assistant is None:
self._ws_assistant = await self._api_factory.get_ws_assistant()
return self._ws_assistant
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.TICKER_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
results = dict()
resp_json = await response.json()
for trading_pair in trading_pairs:
resp_record = [o for o in resp_json["data"] if o["symbol"] == convert_to_exchange_trading_pair(trading_pair)][0]
results[trading_pair] = float(resp_record["close"])
return results
@staticmethod
async def fetch_trading_pairs() -> List[str]:
try:
api_factory = build_api_factory()
rest_assistant = await api_factory.get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.API_VERSION + CONSTANTS.SYMBOLS_URL
request = RESTRequest(method=RESTMethod.GET,
url=url)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status == 200:
all_symbol_infos: Dict[str, Any] = await response.json()
return [f"{symbol_info['base-currency']}-{symbol_info['quote-currency']}".upper()
for symbol_info in all_symbol_infos["data"]
if symbol_info["state"] == "online"]
except Exception:
# Do nothing if the request fails -- there will be no autocomplete for huobi trading pairs
pass
return []
async def get_snapshot(self, trading_pair: str) -> Dict[str, Any]:
rest_assistant = await self._get_rest_assistant()
url = CONSTANTS.REST_URL + CONSTANTS.DEPTH_URL
# when type is set to "step0", the default value of "depth" is 150
params: Dict = {"symbol": convert_to_exchange_trading_pair(trading_pair), "type": "step0"}
request = RESTRequest(method=RESTMethod.GET,
url=url,
params=params)
response: RESTResponse = await rest_assistant.call(request=request)
if response.status != 200:
raise IOError(f"Error fetching Huobi market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
snapshot_data: Dict[str, Any] = await response.json()
return snapshot_data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
timestamp = snapshot["tick"]["ts"]
snapshot_msg: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
msg=snapshot,
timestamp=timestamp,
metadata={"trading_pair": trading_pair},
)
order_book: OrderBook = self.order_book_create_function()
order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)
return order_book
async def _subscribe_channels(self, ws: WSAssistant):
try:
for trading_pair in self._trading_pairs:
subscribe_orderbook_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.depth.step0",
"id": convert_to_exchange_trading_pair(trading_pair)
})
subscribe_trade_request: WSRequest = WSRequest({
"sub": f"market.{convert_to_exchange_trading_pair(trading_pair)}.trade.detail",
"id": convert_to_exchange_trading_pair(trading_pair)
})
await ws.send(subscribe_orderbook_request)
await ws.send(subscribe_trade_request)
self.logger().info("Subscribed to public orderbook and trade channels...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to order book trading and delta streams...", exc_info=True
)
raise
async def listen_for_subscriptions(self):
ws = None
while True:
try:
ws: WSAssistant = await self._get_ws_assistant()
await ws.connect(ws_url=CONSTANTS.WS_PUBLIC_URL, ping_timeout=self.HEARTBEAT_INTERVAL)
await self._subscribe_channels(ws)
async for ws_response in ws.iter_messages():
data = ws_response.data
if "subbed" in data:
continue
if "ping" in data:
ping_request = WSRequest(payload={
"pong": data["ping"]
})
await ws.send(request=ping_request)
channel = data.get("ch", "")
if channel.endswith(self.TRADE_CHANNEL_SUFFIX):
self._message_queue[self.TRADE_CHANNEL_SUFFIX].put_nowait(data)
if channel.endswith(self.ORDERBOOK_CHANNEL_SUFFIX):
self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX].put_nowait(data)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred when listening to order book streams. Retrying in 5 seconds...",
exc_info=True,
)
await self._sleep(5.0)
finally:
ws and await ws.disconnect()
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.TRADE_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
trading_pair = msg["ch"].split(".")[1]
timestamp = msg["tick"]["ts"]
for data in msg["tick"]["data"]:
trade_message: OrderBookMessage = HuobiOrderBook.trade_message_from_exchange(
msg=data,
timestamp=timestamp,
metadata={"trading_pair": convert_from_exchange_trading_pair(trading_pair)}
)
output.put_nowait(trade_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
message_queue = self._message_queue[self.ORDERBOOK_CHANNEL_SUFFIX]
while True:
try:
msg: Dict[str, Any] = await message_queue.get()
timestamp = msg["tick"]["ts"]
order_book_message: OrderBookMessage = HuobiOrderBook.diff_message_from_exchange(
msg=msg,
timestamp=timestamp
)
output.put_nowait(order_book_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error with WebSocket connection. Retrying after 30 seconds...",
exc_info=True)
await self._sleep(30.0)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
await self._sleep(self.ORDER_BOOK_SNAPSHOT_DELAY)
try:
for trading_pair in self._trading_pairs:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair)
snapshot_message: OrderBookMessage = HuobiOrderBook.snapshot_message_from_exchange(
snapshot,
timestamp=snapshot["tick"]["ts"],
metadata={"trading_pair": trading_pair},
)
output.put_nowait(snapshot_message)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error listening for orderbook snapshots. Retrying in 5 secs...", exc_info=True)
await self._sleep(5.0)
| 1.671875
| 2
|
varifier/__main__.py
|
leoisl/varifier
| 0
|
12781456
|
#!/usr/bin/env python3
import argparse
import logging
import varifier
def main(args=None):
parser = argparse.ArgumentParser(
prog="varifier",
usage="varifier <command> <options>",
description="varifier: variant call adjudication",
)
parser.add_argument("--version", action="version", version=varifier.__version__)
parser.add_argument(
"--debug",
help="More verbose logging, and less file cleaning",
action="store_true",
)
subparsers = parser.add_subparsers(title="Available commands", help="", metavar="")
# ---------------------- make_truth_vcf ------------------------------------
subparser_make_truth_vcf = subparsers.add_parser(
"make_truth_vcf",
help="Make truth VCF file",
usage="varifier make_truth_vcf [options] <truth_fasta> <ref_fasta> <outdir>",
description="Make truth VCF file",
)
subparser_make_truth_vcf.add_argument(
"truth_fasta", help="FASTA file of truth genome"
)
subparser_make_truth_vcf.add_argument(
"ref_fasta", help="FASTA file of reference genome"
)
subparser_make_truth_vcf.add_argument(
"--snps_only", help="Output SNPs only",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--output_probes_in_VCF", help="If REF and ALT probes should be output in VCF",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--detailed_VCF", help="Outputs all fields computed by varifier in the final VCF, instead of only GT",
action="store_true",
)
subparser_make_truth_vcf.add_argument(
"--max_recall_ref_len",
help="Do not include variants where REF length is more than this number. Default is no limit",
type=int,
metavar="INT",
)
subparser_make_truth_vcf.add_argument(
"--flank_length",
help="Length of sequence to add either side of variant when making probe sequences [%(default)s]",
type=int,
default=100,
metavar="INT",
)
subparser_make_truth_vcf.add_argument(
"--truth_mask",
help="BED file of truth genome regions to mask. Any variants in the VCF matching to the mask are flagged and will not count towards precision or recall if the output VCF is used with vcf_eval",
metavar="FILENAME",
)
subparser_make_truth_vcf.add_argument("outdir", help="Name of output directory")
subparser_make_truth_vcf.set_defaults(func=varifier.tasks.make_truth_vcf.run)
# ------------------------ vcf_eval ----------------------------------------
subparser_vcf_eval = subparsers.add_parser(
"vcf_eval",
help="Evaluate VCF file",
usage="varifier vcf_eval [options] <truth_fasta> <vcf_fasta> <vcf_file> <outdir>",
description="Evaluate VCF file",
)
subparser_vcf_eval.add_argument(
"--flank_length",
help="Length of sequence to add either side of variant when making probe sequences [%(default)s]",
type=int,
default=100,
metavar="INT",
)
subparser_vcf_eval.add_argument(
"--force", help="Replace outdir if it already exists", action="store_true"
)
subparser_vcf_eval.add_argument(
"--ref_mask",
help="BED file of ref regions to mask. Any variants in the VCF overlapping the mask are removed at the start of the pipeline",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--truth_mask",
help="BED file of truth genome regions to mask. Any variants in the VCF matching to the mask are flagged and do not count towards precision or recall",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--truth_vcf",
help="VCF file of variant calls between vcf_fasta and truth_fasta, where reference of this VCF file is truth_fasta. If provided, used to calculate recall",
metavar="FILENAME",
)
subparser_vcf_eval.add_argument(
"--max_recall_ref_len",
help="For recall, do not look for expected variants where REF length is more than this number. Default is no limit. This option will not work if you use --truth_vcf",
type=int,
metavar="INT",
)
subparser_vcf_eval.add_argument(
"--use_ref_calls",
help="Include 0/0 genotype calls when calculating TPs and precision. By default they are ignored",
action="store_true",
)
subparser_vcf_eval.add_argument("truth_fasta", help="FASTA file of truth genome")
subparser_vcf_eval.add_argument(
"vcf_fasta", help="FASTA file corresponding to vcf_file"
)
subparser_vcf_eval.add_argument("vcf_in", help="VCF file to evaluate")
subparser_vcf_eval.add_argument("outdir", help="Name of output directory")
subparser_vcf_eval.set_defaults(func=varifier.tasks.vcf_eval.run)
args = parser.parse_args()
log = logging.getLogger()
if args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 2.515625
| 3
|
random_tweet.py
|
michaelbutler/random-tweet
| 6
|
12781457
|
<reponame>michaelbutler/random-tweet<gh_stars>1-10
import sys
from random_tweet.random_tweet_lib import get_random_tweet, format_tweet
def load_credentials(filepath: str) -> dict:
"""
Load a file containing key and secret credentials, separated by a line break (\n)
Returns a dict with the corresponding credentials
"""
with open(filepath, 'r') as file_resource:
data = file_resource.read().strip().split('\n')
return {
'consumer_key': data[0],
'consumer_secret': data[1]
}
if __name__ == "__main__":
"""
Main entry point for the command line application
"""
term = ""
try:
term = sys.argv[1]
except IndexError:
print("Error: No search term entered.")
print("Usage: python random_tweet.py <search_term>")
exit(1)
credentials = {}
try:
credentials = load_credentials('./credentials')
except IOError:
print('"Credentials" file not found')
exit(1)
tweet = get_random_tweet(term, credentials)
if not tweet:
print("No tweets were found with that search term. You can try a different term or try again later.")
exit(1)
print(format_tweet(tweet))
| 3.640625
| 4
|
python/pyabc/__init__.py
|
MarbleHE/ABC
| 6
|
12781458
|
<reponame>MarbleHE/ABC<gh_stars>1-10
from .ABCContext import ABCContext
from .ABCProgram import ABCProgram
from .ABCTypes import *
| 1.21875
| 1
|
account/migrations/0002_account_statut.py
|
Rizvane95140/Educollab
| 0
|
12781459
|
# Generated by Django 3.0.2 on 2020-01-13 19:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='statut',
field=models.CharField(choices=[('PROFESSOR', 'PROFESSOR'), ('STUDENT', 'STUDENT')], default='STUDENT', max_length=10),
),
]
| 1.9375
| 2
|
tests/core/endpoints/test_eth_get_transaction_by_blockhash_and_index.py
|
cducrest/eth-tester-rpc
| 3
|
12781460
|
<filename>tests/core/endpoints/test_eth_get_transaction_by_blockhash_and_index.py
def test_eth_getTransactionByBlockHashAndIndex_for_unknown_hash(rpc_client):
result = rpc_client(
method="eth_getTransactionByBlockHashAndIndex",
params=["0x0000000000000000000000000000000000000000000000000000000000000000", 0],
)
assert result is None
def test_eth_getTransactionByBlockHashAndIndex(rpc_client, accounts):
tx_hash = rpc_client(
method="eth_sendTransaction",
params=[{
"from": accounts[0],
"to": accounts[1],
"value": 1234,
"data": "0x1234",
"gas": 100000,
}],
)
block_number = rpc_client('eth_blockNumber')
block = rpc_client(
'eth_getBlockByNumber',
params=[block_number]
)
txn = rpc_client(
method="eth_getTransactionByBlockHashAndIndex",
params=[block['hash'], 0]
)
assert txn
assert txn['hash'] == tx_hash
| 2.0625
| 2
|
decharges/parametre/tests/test_models.py
|
Brndan/decharges-sudeducation
| 0
|
12781461
|
import pytest
from decharges.parametre.models import ParametresDApplication
pytestmark = pytest.mark.django_db
def test_instanciate_parameters():
params = ParametresDApplication.objects.create()
assert f"{params}" == "Paramètres de l'application"
| 2.140625
| 2
|
src/my_project/easy_problems/from51to100/binary_tree_preorder_traversal.py
|
ivan1016017/LeetCodeAlgorithmProblems
| 0
|
12781462
|
<reponame>ivan1016017/LeetCodeAlgorithmProblems
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.temp = []
def preorderTraversal(self, root: TreeNode) -> List[int]:
self.listTraversal(root)
return self.temp
def listTraversal(self, root: TreeNode):
if root is not None:
self.temp.append(root.val)
self.preorderTraversal(root.left)
self.preorderTraversal(root.right)
solution = Solution()
print(solution.preorderTraversal(TreeNode(1,TreeNode(2))))
| 3.890625
| 4
|
adminmgr/media/code/python/red2/BD_204_219_1354_reducer.py
|
IamMayankThakur/test-bigdata
| 9
|
12781463
|
<reponame>IamMayankThakur/test-bigdata
#!/usr/bin/python3
import csv
from operator import itemgetter
import sys
runvery={}
final={}
for line in sys.stdin:
line = line.strip()
line_val = line.split("\t")
key, val = line_val[0], line_val[1]
keysplit=key.split(",")
bbpair=keysplit[0]+","+keysplit[1]
runs=int(keysplit[2])
if bbpair in runvery.keys():
runvery[bbpair][0]=1+runvery[bbpair][0]
runvery[bbpair][1]=runs+runvery[bbpair][1]
else:
runvery[bbpair]=[1,0]
runvery[bbpair][1]=runs+runvery[bbpair][1]
for x, y in runvery.items():
if y[0]>5:
final[x]=y
listkeys = sorted(final.items())
listdel = sorted(listkeys, key = lambda x: x[1][0])
listrun = sorted(listdel, key = lambda x: x[1][1], reverse = True)
for i in listrun:
print (i[0]+","+str(i[1][1])+","+str(i[1][0]))
| 2.90625
| 3
|
chem/ftlib/finetune/gtot.py
|
youjibiying/-GTOT-Tuning
| 1
|
12781464
|
import numpy as np
import torch
import torch.nn as nn
# Adapted from https://github.com/gpeyre/SinkhornAutoDiff
# Adapted from https://github.com/gpeyre/SinkhornAutoDiff/blob/master/sinkhorn_pointcloud.py
class GTOT(nn.Module):
r"""
GTOT implementation.
"""
def __init__(self, eps=0.1, thresh=0.1, max_iter=100, reduction='none'):
super(GTOT, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
self.thresh = thresh
self.mask_matrix = None
def marginal_prob_unform(self, N_s=None, N_t=None, mask=None, ):
if mask is not None:
mask = mask.float()
# uniform distribution
mask_mean = (1 / mask.sum(1)).unsqueeze(1)
mu = mask * mask_mean # 1/n
# mu = mu.unsqueeze(2)
else:
mu = torch.ones(self.bs, N_s) / N_s
nu = mu.clone().detach()
return mu, nu
def forward(self, x, y, C=None, A=None, mask=None):
# The Sinkhorn algorithm takes as input three variables :
if C is None:
C = self._cost_matrix(x, y) # Wasserstein cost function
C = C / C.max()
if A is not None:
if A.type().startswith('torch.cuda.sparse'):
self.sparse = True
C = A.to_dense() * C
else:
self.sparse = False
C = A * C
N_s = x.shape[-2]
N_t = y.shape[-2]
if x.dim() == 2:
self.bs = 1
else:
self.bs = x.shape[0]
# both marginals are fixed with equal weights
if mask is None:
mu = torch.empty(self.bs, N_s, dtype=torch.float, device=C.device,
requires_grad=False).fill_(1.0 / N_s).squeeze()
nu = torch.empty(self.bs, N_t, dtype=torch.float, device=C.device,
requires_grad=False).fill_(1.0 / N_t).squeeze()
else:
mu, nu = self.marginal_prob_unform(N_s=N_s, N_t=N_t, mask=mask)
u = torch.zeros_like(mu)
v = torch.zeros_like(nu)
# To check if algorithm terminates because of threshold
# or max iterations reached
actual_nits = 0
# Stopping criterion
thresh = self.thresh
# Sinkhorn iterations
for i in range(self.max_iter):
u1 = u # useful to check the update
if mask is None:
u = self.eps * (torch.log(mu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A), dim=-1)) + u
v = self.eps * (
torch.log(nu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A).transpose(-2, -1), dim=-1)) + v
else:
u = self.eps * (torch.log(mu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A), dim=-1)) + u
u = mask * u
v = self.eps * (
torch.log(nu + 1e-8) - self.log_sum(self.exp_M(C, u, v, A=A).transpose(-2, -1), dim=-1)) + v
v = mask * v
# err = (u - u1).abs().sum(-1).mean()
err = (u - u1).abs().sum(-1).max()
actual_nits += 1
if err.item() < thresh:
break
U, V = u, v
pi = self.exp_M(C, U, V, A=A)
cost = torch.sum(pi * C, dim=(-2, -1))
if self.reduction == 'mean':
cost = cost.mean()
elif self.reduction == 'sum':
cost = cost.sum()
if torch.isnan(cost.sum()):
print(pi)
raise
return cost, pi, C
def M(self, C, u, v, A=None):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
S = (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
return S
def exp_M(self, C, u, v, A=None):
if A is not None:
if self.sparse:
a = A.to_dense()
S = torch.exp(self.M(C, u, v)).masked_fill(mask = (1-a).to(torch.bool),value=0)
else:
S = torch.exp(self.M(C, u, v)).masked_fill(mask = (1-A).to(torch.bool),value=0)
return S
elif self.mask_matrix is not None:
return self.mask_matrix * torch.exp(self.M(C, u, v))
else:
return torch.exp(self.M(C, u, v))
def log_sum(self, input_tensor, dim=-1, mask=None):
s = torch.sum(input_tensor, dim=dim)
out = torch.log(1e-8 + s)
if torch.isnan(out.sum()):
raise
if mask is not None:
out = mask * out
return out
def cost_matrix_batch_torch(self, x, y, mask=None):
"Returns the cosine distance batchwise"
# x is the source feature: bs * d * m
# y is the target feature: bs * d * m
# return: bs * n * m
# print(x.size())
bs = list(x.size())[0]
D = x.size(1)
assert (x.size(1) == y.size(1))
x = x.contiguous().view(bs, D, -1) # bs * d * m
x = x.div(torch.norm(x, p=2, dim=1, keepdim=True) + 1e-12)
y = y.div(torch.norm(y, p=2, dim=1, keepdim=True) + 1e-12)
cos_dis = torch.bmm(torch.transpose(x, 1, 2), y) # .transpose(1,2)
cos_dis = 1 - cos_dis # to minimize this value
# cos_dis = - cos_dis
if mask is not None:
mask0 = mask.unsqueeze(2).clone().float()
self.mask_matrix = torch.bmm(mask0, (mask0.transpose(2, 1))) # torch.ones_like(C)
cos_dis = cos_dis * self.mask_matrix
if torch.isnan(cos_dis.sum()):
raise
return cos_dis.transpose(2, 1)
def cost_matrix_torch(self, x, y):
"Returns the cosine distance"
# x is the image embedding
# y is the text embedding
D = x.size(0)
x = x.view(D, -1)
assert (x.size(0) == y.size(0))
x = x.div(torch.norm(x, p=2, dim=0, keepdim=True) + 1e-12)
y = y.div(torch.norm(y, p=2, dim=0, keepdim=True) + 1e-12)
cos_dis = torch.mm(torch.transpose(y, 0, 1), x) # .t()
cos_dis = 1 - cos_dis # to minimize this value
return cos_dis
@staticmethod
def _cost_matrix(x, y, p=2):
"Returns the matrix of $|x_i-y_j|^p$."
x_col = x.unsqueeze(-2)
y_lin = y.unsqueeze(-3)
C = torch.sum((torch.abs(x_col - y_lin)) ** p, -1)
return C
@staticmethod
def ave(u, u1, tau):
"Barycenter subroutine, used by kinetic acceleration through extrapolation."
return tau * u + (1 - tau) * u1
if __name__ == '__main__':
def random_A(n, dense_rate=0.5):
d = n
rand_mat = torch.rand(n, d)
k = round(dense_rate * d) # For the general case change 0.25 to the percentage you need
k_th_quant = torch.topk(rand_mat, k, largest=False)[0][:, -1:]
bool_tensor = rand_mat <= k_th_quant
desired_tensor = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0))
return desired_tensor
n = 5
batch_size = 2
a = np.array([[[i, 0] for i in range(n)] for b in range(batch_size)])
b = np.array([[[i, b + 1] for i in range(n)] for b in range(batch_size)])
# Wrap with torch tensors
x = torch.tensor(a, dtype=torch.float)
y = torch.tensor(b, dtype=torch.float)
x = x.cuda()
y = y.cuda()
for i in np.array(range(2, 11)) * 0.1:
dense_rate = i
print('Adjacent matrix dense_rate', dense_rate, end=' ')
A = random_A(n, dense_rate=dense_rate)
A[range(A.shape[0]), range(A.shape[0])] = 1
# A = torch.eye(n)
print(A)
A = A.repeat(batch_size, 1, 1)
A = A.cuda().to_sparse()
# A=None
sinkhorn = GTOT(eps=0.1, max_iter=100, reduction=None)
dist, P, C = sinkhorn(x, y, A=A)
print("Sinkhorn distances: ", dist)
| 2.234375
| 2
|
smlogging.py
|
nooneisperfect/ReadYourMAPFile
| 0
|
12781465
|
<gh_stars>0
# -*- coding: utf-8 -*-
INFO = 1
ERROR = 2
DEBUG = 4
#loglevel = DEBUG | INFO | ERROR
loglevel = ERROR
def log(level, *args):
if (loglevel & level) != 0:
print (" ".join(map(str, args)))
| 2.90625
| 3
|
services/storage/src/simcore_service_storage/s3.py
|
colinRawlings/osparc-simcore
| 25
|
12781466
|
<reponame>colinRawlings/osparc-simcore
""" Module to access s3 service
"""
import logging
from typing import Dict
from aiohttp import web
from tenacity import before_sleep_log, retry, stop_after_attempt, wait_fixed
from .constants import APP_CONFIG_KEY, APP_S3_KEY
from .s3wrapper.s3_client import MinioClientWrapper
from .settings import Settings
from .utils import RETRY_COUNT, RETRY_WAIT_SECS
log = logging.getLogger(__name__)
async def _setup_s3_bucket(app):
log.debug("setup %s.setup.cleanup_ctx", __name__)
# setup
s3_client = app[APP_S3_KEY]
cfg: Settings = app[APP_CONFIG_KEY]
@retry(
wait=wait_fixed(RETRY_WAIT_SECS),
stop=stop_after_attempt(RETRY_COUNT),
before_sleep=before_sleep_log(log, logging.WARNING),
reraise=True,
)
async def do_create_bucket():
log.debug("Creating bucket: %s", cfg.STORAGE_S3.json(indent=2))
s3_client.create_bucket(cfg.STORAGE_S3.S3_BUCKET_NAME)
try:
await do_create_bucket()
except Exception: # pylint: disable=broad-except
log.exception("Impossible to create s3 bucket. Stoping")
# ok, failures_count = False, 0
# while not ok:
# try:
# s3_client.create_bucket(s3_bucket)
# ok = True
# except Exception: # pylint: disable=W0703
# failures_count +=1
# if failures_count>RETRY_COUNT:
# log.exception("")
# raise
# await asyncio.sleep(RETRY_WAIT_SECS)
yield
# tear-down
log.debug("tear-down %s.setup.cleanup_ctx", __name__)
def setup_s3(app: web.Application):
"""minio/s3 service setup"""
log.debug("Setting up %s ...", __name__)
STORAGE_DISABLE_SERVICES = app[APP_CONFIG_KEY].STORAGE_DISABLE_SERVICES
if "s3" in STORAGE_DISABLE_SERVICES:
log.warning("Service '%s' explicitly disabled in config", "s3")
return
cfg = app[APP_CONFIG_KEY]
s3_client = MinioClientWrapper(
cfg.STORAGE_S3.S3_ENDPOINT,
cfg.STORAGE_S3.S3_ACCESS_KEY,
cfg.STORAGE_S3.S3_SECRET_KEY,
secure=cfg.STORAGE_S3.S3_SECURE,
)
app[APP_S3_KEY] = s3_client
app.cleanup_ctx.append(_setup_s3_bucket)
def get_config_s3(app: web.Application) -> Dict:
cfg = app[APP_CONFIG_KEY].STORAGE_S3
return cfg
| 1.921875
| 2
|
zeroae/goblet/chalice/__init__.py
|
andrew-mcgrath/zeroae-goblet
| 1
|
12781467
|
"""The Chalice Blueprint and Configuration for the Goblin framework."""
from .blueprint import bp
from ..config import configure
__all__ = ["bp", "configure"]
| 1.0625
| 1
|
release/stubs.min/System/Windows/Forms/__init___parts/AutoCompleteMode.py
|
tranconbv/ironpython-stubs
| 0
|
12781468
|
class AutoCompleteMode(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the mode for the automatic completion feature used in the System.Windows.Forms.ComboBox and System.Windows.Forms.TextBox controls.
enum AutoCompleteMode,values: Append (2),None (0),Suggest (1),SuggestAppend (3)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return AutoCompleteMode()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Append=None
None_ =None
Suggest=None
SuggestAppend=None
value__=None
| 2.03125
| 2
|
code/run/remote-server/admin_views.py
|
xianggenb/Android-Studio-Health-Monitor-App
| 0
|
12781469
|
from admin_app_config import db
from models import (User, HazardSummary, HazardLocation)
from views.home_view import HomeView
from views.login_view import LoginView
from views.logout_view import LogoutView
from views.user_view import UserView
from views.mobile_view import (MobileLoginView, MobileView)
from views.user_dash_view import UserDashView
from views.business_dash_view import BusinessDashView
from views.hazard_summary_view import HazardSummaryView
from views.hazard_location_view import HazardLocationView
def add_admin_views(admin, app):
# Home View
admin.add_view(HomeView(name='Home', endpoint='home'))
# Mobile view handling
admin.add_view(MobileLoginView(
name='Mobile Login', endpoint='mobilelogin'))
admin.add_view(MobileView(name='Mobile', endpoint='mobile'))
# User dash view handling
admin.add_view(UserDashView(name='User Portal', endpoint='userdash',
app=app))
admin.add_view(BusinessDashView(name='Business Portal',
endpoint='businessdash', app=app))
# Admin portal views
admin.add_view(UserView(User, db.session, name='Users'))
admin.add_view(HazardSummaryView(
HazardSummary, db.session, name='Hazard Summary'))
admin.add_view(HazardLocationView(
HazardLocation, db.session, name='Hazard Locations'))
# Login and Logout views
admin.add_view(LoginView(name='Login', endpoint='login'))
admin.add_view(LogoutView(name='Logout', endpoint='logout'))
| 1.851563
| 2
|
BlogNews/BlogNews/pipelines.py
|
c727657851/ScrapyPratice
| 0
|
12781470
|
<reponame>c727657851/ScrapyPratice
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from twisted.enterprise import adbapi
import pymysql
import copy
class BlognewsPipeline:
def __init__(self,dbpool):
self.dbpool = dbpool
@classmethod # 函数名固定 会被scrapy 调用 直接使用settings的值
def from_settings(cls,settings):
adbparams = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
password=settings['<PASSWORD>'],
port=settings['MYSQL_PORT'],
cursorclass=pymysql.cursors.DictCursor # 指定cursor类型
)
# 连接数据池ConnectionPool,使用pymysql或者Mysqldb连接
dbpool = adbapi.ConnectionPool('pymysql', **adbparams)
# 返回实例化参数
return cls(dbpool)
def do_insert(self,cursor,item):
# 对数据库进行插入操作,并不需要commit,twisted会自动commit
insert_sql = """
insert into blog_news(title, summary, new_url, pub_time) VALUES (%s,%s,%s,%s)
"""
cursor.execute(insert_sql,(item['title'],item['new_summary'],item['new_url'],item['pub_time']))
def handle_error(self,failure):
print(failure)
def process_item(self, item, spider):
"""
使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
"""
#对象拷贝 深拷贝
asynItem = copy.deepcopy(item) #需要导入import copy
query = self.dbpool.runInteraction(self.do_insert, asynItem) # 指定操作方法和操作数据
# 添加异常处理
query.addCallback(self.handle_error) # 处理异常
return item
| 2.5
| 2
|
multi_channel_RI.py
|
salesforce/RIRL
| 0
|
12781471
|
<filename>multi_channel_RI.py
#
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Categorical, Normal
class MutualInformationClassifier(nn.Module):
"""
Classifier used to estimate I(X,Y).
Classification is trained to discriminate samples from p(x, y) versus p(x)p(y).
The `log_odds_ratio` method can be used for estimating MI cost.
The `forward` method provides a training loss.
Args:
x_dim (int): Dimension size of the x input.
y_dim (int): Dimension size of the y input.
h_dims (int, float, tuple, list): Defaults to (64,). Sets the hidden size(s)
of the encoder layer(s). If an int or float, interpreted as the hidden
size of a single hidden layer net.
"""
def __init__(self, x_dim, y_dim, h_dims=(64,)):
super().__init__()
self.x_dim = int(x_dim)
self.y_dim = int(y_dim)
if isinstance(h_dims, (int, float)):
self.h_dims = [int(h_dims)]
elif isinstance(h_dims, (list, tuple)):
self.h_dims = list(h_dims)
else:
raise TypeError
layer_list = []
last_h_dim = self.x_dim + self.y_dim
for h_dim in self.h_dims:
layer_list.append(nn.Linear(last_h_dim, h_dim))
layer_list.append(nn.ReLU())
last_h_dim = int(h_dim)
layer_list.append(nn.Linear(last_h_dim, 2))
self.logits = nn.Sequential(*layer_list)
self.softmax = nn.Softmax(dim=1)
self.loss = nn.CrossEntropyLoss()
def log_odds_ratio(self, xs, ys):
"""This is the basis of the MI cost!"""
xy = torch.cat([xs.view(-1, self.x_dim), ys.view(-1, self.y_dim)], 1)
log_probs = torch.log(self.softmax(self.logits(xy)))
# Clipping prevents nan-causing values at OOD x,y pairs.
clipped_log_odds_ratio = torch.clamp(
log_probs[:, 1] - log_probs[:, 0],
min=-50.0,
max=50.0
).detach().numpy()
return clipped_log_odds_ratio
def predict(self, xs, ys):
"""Gives probability that x,y pairs come from p(x,y) versus p(x)p(y)"""
xy = torch.cat([xs.view(-1, self.x_dim), ys.view(-1, self.y_dim)], 1)
probs = self.softmax(self.logits(xy))
return probs[:, 1]
def forward(self, xs, ys):
"""Given a batch of xs and corresponding ys, returns training loss."""
xs = xs.view(-1, self.x_dim).detach()
ys = ys.view(-1, self.y_dim).detach()
xy_from_policy = torch.cat([xs, ys], dim=1)
xy_from_marginals = torch.cat([xs, ys[torch.randperm(ys.shape[0])]], dim=1)
xy = torch.cat([xy_from_policy, xy_from_marginals], dim=0)
targets = torch.cat(
[torch.ones(ys.shape[0], dtype=torch.long),
torch.zeros(ys.shape[0], dtype=torch.long)],
dim=0
)
return self.loss(self.logits(xy), targets).mean()
class PerceptualEncoder(nn.Module):
"""
Encodes input X into stochastic Y.
Learns p(y | x, aux), where aux is an optional auxiliary input.
The auxiliary input is separate from x in case we care specifically about I(X,Y).
The `encode` method samples from p(y | x, aux) and gives the log_prob of the sample.
The `forward` method provides a policy-gradient training loss.
Args:
x_dim (int): The dimension size of the x input.
aux_dim (optional, int): If supplied, the dimension size of the auxiliary input.
The encoding "y" will be a function of the concatenation of "x" and "aux"
inputs, if aux is not None.
residual_style (bool): Defaults to True. Whether to add the stochastic
encoding to the "x" input. If chosen, the encoding dimension ("y_dim") is
automatically set to match "x_dim".
y_dim (optional, int): Only used if residual_style=False, in which case you
must supply this argument, as it determines the dimension size of the
output encoding "y".
h_dims (int, float, list, tuple): Defaults to (64,). Sets the hidden size(s)
of the encoder layer(s). If an int or float, interpreted as the hidden
size of a single hidden layer net.
log_std_bias (float): Defaults to -3.0. A bias term to manually add to the
STD of the output, before applying the softplus nonlinearity. A negative
value helps to ensure that sampled encodings "y" are not overly noisy
during the early phases of training. This is empirically helpful when "y" is
used as an input to a decision policy.
"""
def __init__(self,
x_dim,
aux_dim=None,
residual_style=True,
y_dim=None,
h_dims=(64,),
log_std_bias=-3.0
):
super().__init__()
self.x_dim = int(x_dim)
assert self.x_dim > 0
if isinstance(aux_dim, (int, float)):
self.aux_dim = int(aux_dim)
elif aux_dim is None:
self.aux_dim = 0
else:
raise TypeError
assert self.aux_dim >= 0
self.residual_style = bool(residual_style)
if self.residual_style:
self.y_dim = self.x_dim
else:
self.y_dim = int(y_dim)
assert self.y_dim > 0
if isinstance(h_dims, (int, float)):
self.h_dims = [int(h_dims)]
elif isinstance(h_dims, (list, tuple)):
self.h_dims = list(h_dims)
else:
raise TypeError
self.log_std_bias = float(log_std_bias)
assert -5.0 <= self.log_std_bias <= 5.0
layer_list = []
last_h_dim = self.x_dim + self.aux_dim
for h_dim in self.h_dims:
layer_list.append(nn.Linear(last_h_dim, h_dim))
layer_list.append(nn.ReLU())
last_h_dim = int(h_dim)
layer_list.append(
nn.Linear(last_h_dim, self.y_dim * 2)
# Output mean and pre-softplus-STD for each y (output) dimension
)
self.encoded_params = nn.Sequential(*layer_list)
self.softplus = nn.Softplus()
def get_encoding_mean_and_std(self, xs, auxs=None):
xs = xs.view(-1, self.x_dim)
if auxs is None:
inp = xs
else:
assert self.aux_dim > 0
aux_inp = auxs.view(-1, self.aux_dim)
inp = torch.cat([xs, aux_inp], 1)
mean_and_log_std = self.encoded_params(inp)
y_mean = mean_and_log_std[:, :self.y_dim]
y_std = self.softplus(mean_and_log_std[:, self.y_dim:] + self.log_std_bias)
if self.residual_style:
y_mean = y_mean + xs
y_std = torch.clamp(y_std, 0.05, 100)
y_mean = torch.clamp(y_mean, 0.0, 1000) #TODO: Remove this, I think the clamp of y_std to greater than 0.05 is all that is necessary
return y_mean, y_std
def encode(self, xs, auxs=None):
y_mean, y_std = self.get_encoding_mean_and_std(xs=xs, auxs=auxs)
try:
m = Normal(y_mean, y_std)
except:
print(y_mean)
print(y_std)
# IMPORTANT!!! Gradients can flow through the sampled ys (because `rsample`)
ys = m.rsample()
# Necessary to detach ys here to get proper gradient flow.
enc_log_probs = m.log_prob(ys.detach()).sum(dim=1)
return ys, enc_log_probs
def forward(self, enc_log_probs, advantages):
losses = -enc_log_probs * advantages.detach()
return losses.mean()
class CostlyPerceptualEncoder(nn.Module):
"""
Combines PerceptualEncoder instance with dedicated MutualInformationClassifier
instance to (1) generate perceptual encodings of inputs and (2) maintain an
estimate of the MI of the encodings, all of which is trainable.
Args:
x_dim (int): See "x_dim" of PerceptionEncoder
mi_h_dims (int, float, list, tuple): Passed as the "h_dims" argument to the
MutualInformationClassifier.
mi_cost (float): Defaults to 0.0. The scale applied to the MI estimate when
outputing MI cost of the sampled encoding "y" given "x".
aux_to_mi (bool): Defaults to True. If True, the auxiliary inputs (if any)
are treated as part of the "x" input to the MI classifier.
This changes the interpretation of MI cost!!!
**perceptual_encoder_kwargs: Keyword arguments of PerceptualEncoder that will be
passed to it when constructing the PerceptualEncoder instance.
"""
def __init__(self, x_dim, mi_h_dims=(64,), mi_cost=0.0, aux_to_mi=True,
**perceptual_encoder_kwargs):
super().__init__()
self.x_dim = int(x_dim)
assert self.x_dim > 0
self.mi_cost = float(mi_cost)
assert self.mi_cost >= 0.0
self.aux_to_mi = bool(aux_to_mi)
self.perceptual_encoder = PerceptualEncoder(
self.x_dim,
**perceptual_encoder_kwargs
)
self.aux_dim = self.perceptual_encoder.aux_dim
self.y_dim = int(self.perceptual_encoder.y_dim)
assert self.y_dim > 0
self.mi_classifier = MutualInformationClassifier(
self.x_dim + (self.aux_dim if self.aux_to_mi else 0),
self.y_dim,
mi_h_dims,
)
def get_encoder_params(self):
return self.perceptual_encoder.parameters()
def get_mi_classifier_params(self):
return self.mi_classifier.parameters()
def get_encoding_mean_and_std(self, xs, auxs=None):
return self.perceptual_encoder.get_encoding_mean_and_std(xs=xs, auxs=auxs)
def encode(self, xs, auxs=None):
# Sample a (stochastic) encoding of the input x (can be conditioned on
# auxiliary input)
ys, enc_log_probs = self.perceptual_encoder.encode(xs, auxs)
# Estimate the MI cost associated with each encoding.
if self.aux_to_mi and self.aux_dim > 0:
assert auxs is not None
mi_xs = torch.cat([xs.view(-1, self.x_dim), auxs.view(-1, self.aux_dim)], 1)
else:
mi_xs = xs
mi_cost = self.mi_classifier.log_odds_ratio(mi_xs, ys) * self.mi_cost
return ys, enc_log_probs, mi_cost
def forward(self, mi_xs, ys, log_probs, advantages):
# Note, assumption here is that auxiliary inputs have been concatenated with
# xs, if appropriate
mi_classifier_loss = self.mi_classifier(mi_xs, ys)
encoder_loss = self.perceptual_encoder(log_probs, advantages)
return mi_classifier_loss + encoder_loss
class MultiChannelCostlyPerceptualEncoder(nn.Module):
"""
Generates a single stochastic percept from multiple perceptual channels.
Note: If an auxiliary input is used, it will be used by each perceptual encoder.
Args:
aux_dim (optional, int): Dimension size of any auxiliary input that will be
fed as an input to each channel-specific costly encoder.
**channel_dicts (dictionary): Each key is the name of a channel, and each
value is the kwargs dictionary for the CostlyPerceptualEncoder instance
associated with that channel.
NOTE: The names of the channels will need to match the names of the xs,
as shown in the example.
Example:
# Make an instance with 2 channels, no auxilairy input
mccpe = MultiChannelCostlyPerceptualEncoder(
channel_dicts={
'a': dict(x_dim=5, log_std_bias=0.0, mi_cost=1.0),
'b': dict(x_dim=3, residual_style=False, y_dim=10, mi_cost=2.0)
}
)
# Encode some channel-separated input
obs_a, obs_b = ...
ys, mi_costs, training_dict, avg_mi_cost_channel = mccpe.encode({'a': obs_a, 'b': obs_b})
"""
def __init__(self, aux_dim=None, channel_dicts={}):
super().__init__()
if isinstance(aux_dim, (int, float)):
self.aux_dim = int(aux_dim)
elif aux_dim is None:
self.aux_dim = 0
else:
raise TypeError
assert self.aux_dim >= 0
assert isinstance(channel_dicts, dict)
self.encoders = nn.ModuleDict()
for channel_name, channel_kwargs in channel_dicts.items():
if 'aux_dim' in channel_kwargs:
if self.aux_dim == 0:
assert channel_kwargs['aux_dim'] in [0, None]
else:
assert channel_kwargs['aux_dim'] == self.aux_dim
channel_kwargs['aux_dim'] = self.aux_dim
self.encoders[channel_name] = CostlyPerceptualEncoder(**channel_kwargs)
assert len(self.encoders) >= 1
self._channels = [k for k in self.encoders.keys()]
self.y_dim = sum(encoder.y_dim for encoder in self.encoders.values())
@property
def channels(self):
return self._channels
def get_encoder_params(self):
p = []
for k in self.channels:
p += self.encoders[k].get_encoder_params()
return p
def get_mi_classifier_params(self):
p = []
for k in self.channels:
p += self.encoders[k].get_mi_classifier_params()
return p
def encode(self, xs_dict, auxs=None):
ys = []
mi_cost_list = []
avg_mi_cost_channel = []
training_dict = {}
for channel in self.channels:
encoder = self.encoders[channel]
encoder_xs = xs_dict[channel]
encoder_ys, encoder_log_probs, encoder_mi_cost = encoder.encode(
encoder_xs, auxs
)
avg_mi_cost_channel.append((channel, np.mean(encoder_mi_cost)))
ys.append(encoder_ys)
mi_cost_list.append(encoder_mi_cost)
if encoder.aux_to_mi:
if self.aux_dim == 0:
encoder_mi_xs = encoder_xs
else:
assert auxs is not None
encoder_mi_xs = torch.cat([encoder_xs.view(-1, encoder.x_dim),
auxs.view(-1, self.aux_dim)], 1)
else:
encoder_mi_xs = encoder_xs
training_dict[channel] = {
'mi_xs': encoder_mi_xs,
'ys': encoder_ys,
'log_probs': encoder_log_probs
}
# Concatenate the channel-wise encodings
ys = torch.cat(ys, dim=1)
# Add together the channel-wise MI costs
mi_costs = np.stack(mi_cost_list).sum(axis=0)
return ys, mi_costs, training_dict, avg_mi_cost_channel
def forward(self, advantages, training_dict):
loss = 0
for channel in self.channels:
loss += self.encoders[channel](
advantages=advantages, **training_dict[channel]
)
return loss
class MultiChannelCostlyPerceptualEncoderDiscretePolicy(nn.Module):
"""
Adds a discrete action head on top of MultiChannelCostlyPerceptualEncoder
Args:
action_dim (int): The number of actions that can be taken.
n_actions (int): Defaults to 1, The number of actions to output.
action_mi_cost (float): Defaults to 0.0. The scale applied to the MI estimate
when outputing MI cost of the sampled action "a" given "y".
entropy_coeff (float): Defaults to 0.0. Coefficient placed on the action
entropy for regularizing the policy. You may get weird effects if using
BOTH an MI and entropy cost on the policy.
h_dims (int, float, list, tuple): Defaults to (64, 64). Sets the hidden size(s)
of the action head layer(s). If an int or float, interpreted as the hidden
size of a single hidden layer net.
channel_dicts (dictionary): see MultiChannelCostlyPerceptualEncoder
Example:
# Make an instance with 2 channels
mccpe_pol = MultiChannelCostlyPerceptualEncoderDiscretePolicy(
action_dim=2,
action_mi_cost=0.1,
**{
'a': dict(x_dim=5, log_std_bias=0.0, mi_cost=1.0),
'b': dict(x_dim=3, residual_style=False, y_dim=10, mi_cost=2.0)
}
)
# Sample actions, and total MI costs of internal encodings and the actions
obs_a, obs_b = ...
actions, mi_costs, a_log_probs, a_ents, training_dict = mccpe_pol.sample(
{'a': obs_a, 'b': obs_b}
)
"""
def __init__(self,
action_dim,
n_actions = 1,
action_mi_cost=0.0,
entropy_coeff=0.0,
h_dims=(64, 64),
channel_dicts={}
):
super().__init__()
self.n_actions = int(n_actions)
assert self.n_actions >= 1
self.action_dim = int(action_dim)
assert self.action_dim >= 2
if self.n_actions > 1:
self.action_dim = self.action_dim * self.n_actions
self.action_mi_cost = float(action_mi_cost)
assert self.action_mi_cost >= 0.0
self.entropy_coeff = float(entropy_coeff)
assert self.entropy_coeff >= 0.0
if isinstance(h_dims, (int, float)):
self.h_dims = [int(h_dims)]
elif isinstance(h_dims, (list, tuple)):
self.h_dims = list(h_dims)
else:
raise TypeError
self.mccpe = MultiChannelCostlyPerceptualEncoder(
aux_dim=self.aux_dim,
channel_dicts=channel_dicts
)
layer_list = []
last_h_dim = self.action_head_input_dim
for h_dim in self.h_dims:
layer_list.append(nn.Linear(last_h_dim, h_dim))
layer_list.append(nn.ReLU())
last_h_dim = int(h_dim)
layer_list.append(
nn.Linear(last_h_dim, self.action_dim)
# Output logits based on LSTM state + last encoded outputs
)
self.logits = nn.Sequential(*layer_list)
self.action_mi_classifier = MutualInformationClassifier(
x_dim=self.action_head_input_dim, y_dim=self.n_actions
)
self._optimizer = None
@property
def aux_dim(self):
return 0
@property
def action_head_input_dim(self):
return self.mccpe.y_dim + self.aux_dim
@property
def optimizer(self):
if self._optimizer is None:
self._optimizer = torch.optim.Adam([
{'params': self.get_policy_params(), 'lr': 0.0001},
{'params': self.get_mi_classifier_params(), 'lr': 0.001}
])
return self._optimizer
@property
def channels(self):
return self.mccpe.channels
def get_policy_params(self):
p = []
p += self.mccpe.get_encoder_params()
p += self.logits.parameters()
return p
def get_mi_classifier_params(self):
p = []
p += self.mccpe.get_mi_classifier_params()
p += self.action_mi_classifier.parameters()
return p
def sample(self, xs_dict):
# Sample encodings of the inputs, encoding MI costs, and data for training
ys, encoding_mi_costs, training_dict, avg_mi_cost_channel = self.mccpe.encode(xs_dict)
# Run ys through logits net to get action dist, then sample
logits = self.logits(ys)
m = Categorical(logits=logits)
actions = m.sample()
action_log_probs = m.log_prob(actions)
action_entropies = m.entropy()
# Add in the MI cost for the actions.
action_mis = self.action_mi_classifier.log_odds_ratio(ys, actions)
action_mi_costs = self.action_mi_cost * action_mis
total_mi_costs = encoding_mi_costs + action_mi_costs
# Incorporate action head training stuffs
training_dict['_action_head'] = {
'mi_xs': ys,
'ys': actions,
'log_probs': action_log_probs,
'entropies': action_entropies,
}
actions = actions.detach().numpy()
return actions, total_mi_costs, training_dict
def forward(self, advantages, training_dict):
pg_losses = -training_dict['_action_head']['log_probs'] * advantages
ent_losses = -training_dict['_action_head']['entropies'] * self.entropy_coeff
policy_loss = pg_losses.mean() + ent_losses.mean()
action_mi_classifier_loss = self.action_mi_classifier(
training_dict['_action_head']['mi_xs'],
training_dict['_action_head']['ys'],
)
mcppe_loss = self.mccpe(advantages, training_dict)
return policy_loss + action_mi_classifier_loss + mcppe_loss
class MultiChannelCostlyPerceptualEncoderRecurrentDiscretePolicy(
MultiChannelCostlyPerceptualEncoderDiscretePolicy
):
"""
Extends MultiChannelCostlyPerceptualEncoderDiscretePolicy to use an LSTM to
maintain a learnable memory of encodings. LSTM state becomes an auxiliary input
to encoders and an additional input the the action head.
Args:
*args: See MultiChannelCostlyPerceptualEncoderDiscretePolicy
lstm_dim (int): Dimension size of LSTM hidden state
**kwargs: See MultiChannelCostlyPerceptualEncoderDiscretePolicy
"""
def __init__(self, *args, lstm_dim=32, **kwargs):
self.lstm_dim = int(lstm_dim)
assert self.lstm_dim > 0
super().__init__(*args, **kwargs)
self.lstm_initial_state_h = nn.Parameter(torch.zeros(1, self.lstm_dim))
self.lstm_initial_state_c = nn.Parameter(torch.zeros(1, self.lstm_dim))
self.lstm_cell = nn.LSTMCell(
input_size=self.mccpe.y_dim,
hidden_size=self.lstm_dim
)
@property
def aux_dim(self):
return self.lstm_dim
def get_policy_params(self):
p = []
p += self.mccpe.get_encoder_params()
p += self.logits.parameters()
p += self.lstm_cell.parameters()
p += [self.lstm_initial_state_h, self.lstm_initial_state_c]
return p
def get_initial_state_tuple(self, batch_size):
lstm_state_h = torch.tile(self.lstm_initial_state_h, (batch_size, 1))
lstm_state_c = torch.tile(self.lstm_initial_state_c, (batch_size, 1))
initial_lstm_state = (lstm_state_h, lstm_state_c)
return initial_lstm_state
def sample(self, xs_dict, lstm_state=None):
# Use the (learnable) initial state if none are provided
if lstm_state is None:
batch_size = list(xs_dict.values())[0].shape[0]
lstm_state = self.get_initial_state_tuple(batch_size)
# Sample encodings of the inputs, MI costs of the encodings, and training data
ys, encoding_mi_costs, training_dict, avg_mi_cost_channel = self.mccpe.encode(
xs_dict, auxs=lstm_state[0]
)
# Run ys through LSTM to get updated states
lstm_state = self.lstm_cell(ys, lstm_state)
# Run ys + updated states through logits net to get action dist, then sample
action_head_inputs = torch.cat([ys, lstm_state[0]], 1)
logits = self.logits(action_head_inputs)
if self.n_actions == 1:
m = Categorical(logits=logits)
actions = m.sample()
action_log_probs = m.log_prob(actions)
action_entropies = m.entropy()
else:
n_batch, n_actions_dim = logits.shape
logits_r = logits.view(n_batch, self.n_actions, -1)
m = Categorical(logits = logits_r)
actions = m.sample()
action_log_probs_separate = m.log_prob(actions)
action_log_probs = action_log_probs_separate.sum(1)
action_entropies_separate = m.entropy()
action_entropies = action_entropies_separate.sum(1)
# Add in the MI cost for the actions.
action_mis = self.action_mi_classifier.log_odds_ratio(
action_head_inputs, actions
)
action_mi_costs = self.action_mi_cost * action_mis
total_mi_costs = encoding_mi_costs + action_mi_costs
avg_mi_cost_channel.append(('action', np.mean(action_mi_costs)))
# Incorporate action head training stuffs
training_dict['_action_head'] = {
'mi_xs': action_head_inputs,
'ys': actions,
'log_probs': action_log_probs,
'entropies': action_entropies,
}
actions = actions.detach().numpy()
return actions, total_mi_costs, training_dict, lstm_state, m, avg_mi_cost_channel
# def get_logits(self, xs_dict, lstm_state = None):
# if lstm_state is None:
# batch_size = list(xs_dict.values())[0].shape[0]
# lstm_state = self.get_initial_state_tuple(batch_size)
# # Sample encodings of the inputs, MI costs of the encodings, and training data
# ys, encoding_mi_costs, training_dict = self.mccpe.encode(
# xs_dict, auxs=lstm_state[0]
# )
# # Run ys through LSTM to get updated states
# lstm_state = self.lstm_cell(ys, lstm_state)
# # Run ys + updated states through logits net to get action dist, then sample
# action_head_inputs = torch.cat([ys, lstm_state[0]], 1)
# logits = self.logits(action_head_inputs)
# return logits
#Wrapper Agent for taking care of some training/saving and loading details for the MultiChannelCostlyPerceptualEncoderRecurrentDiscretePolicy
class MCCPERDPAgent():
def __init__(self, action_dim,
n_actions = 1,
action_mi_cost=0.0,
entropy_coeff=0.0,
h_dims=(64, 64),
channel_dicts={},
lstm_dim=32,
future_rewards_only = True,
clip_grads = False
):
self.mccperdp = MultiChannelCostlyPerceptualEncoderRecurrentDiscretePolicy(
action_dim=action_dim,
n_actions = n_actions,
h_dims=h_dims,
entropy_coeff=entropy_coeff,
lstm_dim=lstm_dim,
action_mi_cost=action_mi_cost,
channel_dicts=channel_dicts,
)
self.lstm_state = None
self.batch_size = 0
self.future_rewards_only = future_rewards_only
self.clip_grads = clip_grads
self._train_counter = 0
@property
def training_iters(self):
return self._train_counter
#clears the training dict and lstm state for a new episode
def new_episode(self):
self.lstm_state = None
self.training_dict = {}
self.mi_channel_through_time = {}
def act(self, p_state, get_model = False):
p_actions, total_mi_costs, training_dict_, lstm_state, m, avg_mi_cost_channel = self.mccperdp.sample(
p_state,
lstm_state=self.lstm_state,
)
for channel_name, mi_cost in avg_mi_cost_channel:
if channel_name not in self.mi_channel_through_time:
self.mi_channel_through_time[channel_name] = []
self.mi_channel_through_time[channel_name].append(mi_cost)
for k, v in training_dict_.items():
if k not in self.training_dict:
self.training_dict[k] = {vk: [] for vk in v.keys()}
for vk, vv in v.items():
self.training_dict[k][vk].append(vv)
self.lstm_state = lstm_state
self.batch_size = list(p_state.values())[0].shape[0]
if not get_model:
return p_actions, total_mi_costs
else:
return p_actions, total_mi_costs, m
def end_episode(self, rewards_seq, train = True):
loss = 0
# ... aggregate tensors for the PRINCIPAL
for channel in self.training_dict.keys():
for k, v in self.training_dict[channel].items():
if k in ['mi_xs', 'ys']:
self.training_dict[channel][k] = torch.cat(v, 0)
else:
self.training_dict[channel][k] = torch.stack(v)
if train:
# ... calculate cumulative rewards & advantages for the PRINCIPAL
rs = np.stack(rewards_seq)
if self.future_rewards_only:
cum_rs = np.cumsum(rs[::-1], axis=0)[::-1] # cumulative rewards are [timesteps, batch_size]
else:
n_timesteps = len(rewards_seq)
cum_rs = np.tile(np.sum(rs, axis = 0),(n_timesteps, 1)) #use sum of rewards for all timesteps
advantages = torch.as_tensor(
(cum_rs - cum_rs.mean(1, keepdims=True)) / cum_rs.std(1, keepdims=True),
dtype=torch.float32)
#### TRAIN PRINCIPAL ####
self.mccperdp.optimizer.zero_grad()
loss = self.mccperdp(advantages, self.training_dict)
loss.backward()
if self.clip_grads:
torch.nn.utils.clip_grad_norm_(self.mccperdp.parameters(), 4.0)
self.mccperdp.optimizer.step()
self._train_counter += 1
return loss
def get_mis_channels(self):
channel_mis = []
for channel in self.mccperdp.mccpe.channels:
mi_classifier = self.mccperdp.mccpe.encoders[channel].mi_classifier
log_odds = mi_classifier.log_odds_ratio(
self.training_dict[channel]['mi_xs'], self.training_dict[channel]['ys']
).reshape(-1, self.batch_size)
channel_mis.append(('mi-' + channel, log_odds.mean(1)))
return channel_mis
def get_mis_channel_history(self):
return self.mi_channel_through_time
def save_model(self, file_prefix):
file_name = file_prefix
torch.save(self.mccperdp.state_dict(), file_name)
def load_model(self, file_prefix):
file_name = file_prefix
self.mccperdp.load_state_dict(torch.load(file_name))
| 2.609375
| 3
|
site-packages/oslo_utils/tests/test_fixture.py
|
hariza17/freezer_libraries
| 0
|
12781472
|
<reponame>hariza17/freezer_libraries
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslotest import base as test_base
import six
from oslo_utils import fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
class TimeFixtureTest(test_base.BaseTestCase):
def test_set_time_override_using_default(self):
# When the fixture is used with its default constructor, the
# override_time is set to the current timestamp.
# Also, when the fixture is cleaned up, the override_time is reset.
self.assertIsNone(timeutils.utcnow.override_time)
with fixture.TimeFixture():
self.assertIsNotNone(timeutils.utcnow.override_time)
self.assertIsNone(timeutils.utcnow.override_time)
def test_set_time_override(self):
# When the fixture is used to set a time, utcnow returns that time.
new_time = datetime.datetime(2015, 1, 2, 3, 4, 6, 7)
self.useFixture(fixture.TimeFixture(new_time))
self.assertEqual(new_time, timeutils.utcnow())
# Call again to make sure it keeps returning the same time.
self.assertEqual(new_time, timeutils.utcnow())
def test_advance_time_delta(self):
# advance_time_delta() advances the overridden time by some timedelta.
new_time = datetime.datetime(2015, 1, 2, 3, 4, 6, 7)
time_fixture = self.useFixture(fixture.TimeFixture(new_time))
time_fixture.advance_time_delta(datetime.timedelta(seconds=1))
expected_time = datetime.datetime(2015, 1, 2, 3, 4, 7, 7)
self.assertEqual(expected_time, timeutils.utcnow())
def test_advance_time_seconds(self):
# advance_time_seconds() advances the overridden time by some number of
# seconds.
new_time = datetime.datetime(2015, 1, 2, 3, 4, 6, 7)
time_fixture = self.useFixture(fixture.TimeFixture(new_time))
time_fixture.advance_time_seconds(2)
expected_time = datetime.datetime(2015, 1, 2, 3, 4, 8, 7)
self.assertEqual(expected_time, timeutils.utcnow())
class UUIDSentinelsTest(test_base.BaseTestCase):
def test_different_sentinel(self):
uuid1 = uuids.foobar
uuid2 = uuids.barfoo
self.assertNotEqual(uuid1, uuid2)
def test_returns_uuid(self):
self.assertTrue(uuidutils.is_uuid_like(uuids.foo))
def test_returns_string(self):
self.assertIsInstance(uuids.foo, str)
def test_with_underline_prefix(self):
ex = self.assertRaises(ValueError, getattr, uuids, '_foo')
self.assertIn("Sentinels must not start with _", six.text_type(ex))
| 2.046875
| 2
|
GmailBot.py
|
umaimagit/EverydayTasksAutomation
| 0
|
12781473
|
<reponame>umaimagit/EverydayTasksAutomation
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 16:16:27 2019
@author: Umaima
"""
# Gmail automation with selenium
from selenium import webdriver
#from selenium.webdriver.common.keys import Keys
import time
class GmailBot:
def __init__(self, email, password):
self.email = email
self.password = password
self.driver = webdriver.Chrome()
def closeBrowser(self):
self.driver.close()
def login(self):
driver = self.driver
driver.get("https://www.gmail.com/")
time.sleep(2)
emailElem = driver.find_element_by_id('identifierId')
emailElem.send_keys(self.email)
nextButton = driver.find_element_by_id('identifierNext')
nextButton.click()
time.sleep(2)
passwordElem = driver.find_element_by_name('password')
passwordElem.send_keys(<PASSWORD>.password)
driver.find_element_by_id('passwordNext').click()
# scroll all mails
#driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(5)
account_button = driver.find_element_by_xpath("//a[@href='https://accounts.google.com/SignOutOptions?hl=en-GB&continue=https://mail.google.com/mail&service=mail']")
account_button.click()
time.sleep(6)
logout_button = driver.find_element_by_xpath("//a[@id='gb_71']")
logout_button.click()
if __name__ == "__main__":
email = "email id"
password = "password"
gb = GmailBot(email, password)
gb.login()
time.sleep(5)
gb.closeBrowser()
| 3.03125
| 3
|
mntnr_hardware/__init__.py
|
wryfi/mntnr_hardware
| 0
|
12781474
|
<reponame>wryfi/mntnr_hardware
from enumfields import Enum
class RackOrientation(Enum):
FRONT = 1
REAR = 2
class Labels:
FRONT = 'Front-facing'
REAR = 'Rear-facing'
class SwitchSpeed(Enum):
TEN = 10
ONE_HUNDRED = 100
GIGABIT = 1000
TEN_GIGABIT = 10000
FORTY_GIGABIT = 40000
class Labels:
TEN = '10 Mbps'
ONE_HUNDRED = '100 Mbps'
GIGABIT = '1 Gbps'
TEN_GIGABIT = '10 Gbps'
FORTY_GIGABIT = '40 Gbps'
class SwitchInterconnect(Enum):
RJ45 = 1
TWINAX = 2
class Labels:
RJ45 = 'RJ-45'
TWINAX = 'Twinaxial'
class RackDepth(Enum):
QUARTER = 1
HALF = 2
THREE_QUARTER = 3
FULL = 4
class Labels:
QUARTER = 'Quarter depth'
HALF = 'Half depth'
THREE_QUARTER = 'Three-quarter depth'
FULL = 'Full depth'
class CpuManufacturer(Enum):
INTEL = 1
AMD = 2
QUALCOMM = 3
NVIDIA = 4
ORACLE = 5
OTHER = 9
class Labels:
INTEL = 'Intel'
AMD = 'AMD'
QUALCOMM = 'Qualcomm'
NVIDIA = 'nVidia'
ORACLE = 'Oracle'
OTHER = 'other'
class CabinetAttachmentMethod(Enum):
CAGE_NUT_95 = 1
DIRECT_ATTACH = 2
class Labels:
CAGE_NUT_95 = '9.5mm cage nut'
DIRECT_ATTACH = 'direct attachment'
class CabinetFastener(Enum):
UNF_10_32 = 1
UNC_12_24 = 2
M5 = 5
M6 = 6
OTHER = 9
class Labels:
UNF_10_32 = 'UNF 10-32'
UNC_12_24 = 'UNC 12-24'
M5 = 'M5'
M6 = 'M6'
OTHER = 'other'
| 2.625
| 3
|
practice.py
|
betweak-ml/machinelearning1
| 0
|
12781475
|
<reponame>betweak-ml/machinelearning1
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: 'Tweakers'
# '<EMAIL>'
#
# Created on 17/07/2019
import tensorflow as tf
tf.gradients()
| 1.5625
| 2
|
Benchmarking/Config/create_benchmark_job.py
|
CipiOrhei/eecvf
| 1
|
12781476
|
<reponame>CipiOrhei/eecvf
import glob
import os
import shutil
import config_main
from Benchmarking.CM_Benchmark.basic_benchmark.fom import run_CM_benchmark_FOM, run_CM_benchmark_SFOM
from Benchmarking.CM_Benchmark.basic_benchmark.psnr import run_CM_benchmark_PSNR
from Benchmarking.CM_Benchmark.basic_benchmark.IoU import run_CM_benchmark_IoU
from Benchmarking.sb_benchmark.sb_IoU import run_SB_benchmark_IoU
from Benchmarking.cbir.zubud import run_cbir_Top1
from Benchmarking.bsds500.verify import run_verify_boundry
from Utils.log_handler import log_benchmark_info_to_console
def delete_folder_benchmark_out():
"""
Delete the content of out folder where the saved images are.
:return: None
"""
path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, '*')
files = glob.glob(path)
for f in files:
shutil.rmtree(f, ignore_errors=True)
log_benchmark_info_to_console('DELETED CONTENT OF: {}'.format(config_main.BENCHMARK_RESULTS))
def set_gt_location(location: str):
"""
Sets the location of ground truth of images
:param location: location relative to repo
:return: None
"""
config_main.BENCHMARK_GT_LOCATION = location
log_benchmark_info_to_console('GroundTruth location set: {}'.format(location))
def set_input_location(location: str):
"""
Sets the location of input images
:param location: location relative to repo
:return: None
"""
config_main.BENCHMARK_INPUT_LOCATION = location
log_benchmark_info_to_console('Image location set: {}'.format(location))
def set_image_set(location: str):
"""
Sets the location of raw images
:param location: location relative to repo
:return: None
"""
image_set = []
for dir_name, dir_names, file_names in os.walk(location):
# array that stores all names
for filename in file_names:
image_set.append(filename.split('.')[0])
config_main.BENCHMARK_SAMPLE_NAMES = image_set
log_benchmark_info_to_console('Image set acquired: {}'.format(image_set))
def job_set(set_to_use: list):
"""
Configure set of jobs
:param set_to_use: list of jobs to add to job
:return: None
"""
config_main.BENCHMARK_SETS = set_to_use
log_benchmark_info_to_console('Test sets: {}'.format(set_to_use))
log_benchmark_info_to_console('Test sets number: {}'.format(len(set_to_use)))
def run_bsds500_boundary_benchmark(input_location: str, gt_location: str, raw_image: str,
jobs_set: list, do_thinning: bool = True, px_offset=5):
"""
Run BSDS boundary benchmark
:param input_location: location of input of predicted edges
:param gt_location: location of input of ground truth edges
:param raw_image: location of input images
:param do_thinning: if we want to do thinning of predicted edges
:param px_offset: number of pixels offset between the ground truth and predicted edges to be considered as valid
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(location=gt_location)
set_input_location(location=input_location)
set_image_set(location=raw_image)
job_set(set_to_use=jobs_set)
if config_main.LINUX_OS:
run_verify_boundry(thinning=do_thinning, max_distance_px=px_offset)
else:
log_benchmark_info_to_console("WE CAN'T RUN ON WINDOWS OS")
# noinspection PyPep8Naming
def run_PSNR_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list, db_calc: bool = False):
"""
<NAME> and <NAME>, “Contextual and Non-Contextual Performance Evaluation of Edge Detectors,”
Pattern Recognition Letters, vol. 21, no. 8, pp. 805-816, 2000.
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:param db_calc: PSNR id dB
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_PSNR(db_calc)
# noinspection PyPep8Naming
def run_FOM_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list):
"""
Run Figure of Merit benchmark
<NAME>, Digital Image Processing. New York: Wiley Interscience 1978
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_FOM()
# noinspection PyPep8Naming
def run_SFOM_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list):
"""
Run Symetric Figure of Merit benchmark
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_SFOM()
# noinspection PyPep8Naming
def run_IoU_benchmark(input_location: str, gt_location: str, class_list_name: list, raw_image: str, jobs_set: list, unknown_class: int,
is_rgb_gt=False, class_list_rgb_value=None, show_only_set_mean_value=False):
"""
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param class_list_name: list of name of classes
:param unknown_class: which class is used for unknown predicted pixels
:param is_rgb_gt: is the ground truth image is RGB colored
:param class_list_rgb_value: list of greyscale values of classes
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:param show_only_set_mean_value: show on console only the mean IoU for each set
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_IoU(class_list_name, unknown_class, is_rgb_gt, class_list_rgb_value, show_only_set_mean_value)
# noinspection PyPep8Naming
def run_SB_IoU_benchmark(input_location: str, gt_location: str, jobs_set: list, raw_image: str):
"""
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param class_list_name: list of name of classes
:param unknown_class: which class is used for unknown predicted pixels
:param is_rgb_gt: is the ground truth image is RGB colored
:param class_list_rgb_value: list of greyscale values of classes
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:param show_only_set_mean_value: show on console only the mean IoU for each set
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
job_set(jobs_set)
set_image_set(raw_image)
run_SB_benchmark_IoU()
# noinspection PyPep8Naming
def run_CBIR_ZuBuD_benchmark(input_location: str, gt_location: str, jobs_set: list, raw_image: str):
"""
Run Figure of Merit benchmark
<NAME>, Digital Image Processing. New York: Wiley Interscience 1978
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_input_location(input_location)
job_set(jobs_set)
set_image_set(raw_image)
run_cbir_Top1(gt_location)
if __name__ == "__main__":
pass
| 2.203125
| 2
|
Snippets/code-gym-1/the_letter_is.py
|
ursaMaj0r/python-csc-125
| 0
|
12781477
|
<filename>Snippets/code-gym-1/the_letter_is.py
# input
input_word = input("Enter a word: ")
# response
for letter in input_word:
print("The letter is {}.".format(letter))
| 3.921875
| 4
|
Src/WorkItemEventProcessor.Tests/Dsl/tfs/loadparentwi.py
|
rfennell/TfsAlertsDsl
| 0
|
12781478
|
<filename>Src/WorkItemEventProcessor.Tests/Dsl/tfs/loadparentwi.py
wi = GetWorkItem(99)
parentwi = GetParentWorkItem(wi)
if parentwi == None:
print("Work item '" + str(wi.Id) + "' has no parent")
else:
print("Work item '" + str(wi.Id) + "' has a parent '" + str(parentwi.Id) + "' with the title '" + parentwi.Title +"'")
| 2.296875
| 2
|
main.py
|
itsdaniele/graphtrans
| 0
|
12781479
|
<reponame>itsdaniele/graphtrans<gh_stars>0
import os
import random
from datetime import datetime
import configargparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import wandb
from loguru import logger
from ogb.graphproppred import Evaluator, PygGraphPropPredDataset
from torch.optim.lr_scheduler import CosineAnnealingLR, OneCycleLR, ReduceLROnPlateau
from torch_geometric.data import DataLoader
from tqdm import tqdm
import utils
from data.adj_list import compute_adjacency_list_cached
from dataset import DATASET_UTILS
from models import get_model_and_parser
from trainers import get_trainer_and_parser
torch.cuda.empty_cache()
wandb.init(project="graph-aug")
now = datetime.now()
now = now.strftime("%m_%d-%H_%M_%S")
def main():
# fmt: off
parser = configargparse.ArgumentParser(allow_abbrev=False,
description='GNN baselines on ogbg-code data with Pytorch Geometrics')
parser.add_argument('--configs', required=False, is_config_file=True)
parser.add_argument('--wandb_run_idx', type=str, default=None)
parser.add_argument('--data_root', type=str, default='/data/zhwu/ogb')
parser.add_argument('--dataset', type=str, default="ogbg-code",
help='dataset name (default: ogbg-code)')
parser.add_argument('--aug', type=str, default='baseline',
help='augment method to use [baseline|flag|augment]')
parser.add_argument('--max_seq_len', type=int, default=None,
help='maximum sequence length to predict (default: None)')
group = parser.add_argument_group('model')
group.add_argument('--model_type', type=str, default='gnn', help='gnn|pna|gnn-transformer')
group.add_argument('--graph_pooling', type=str, default='mean')
group = parser.add_argument_group('gnn')
group.add_argument('--gnn_type', type=str, default='gcn')
group.add_argument('--gnn_virtual_node', action='store_true')
group.add_argument('--gnn_dropout', type=float, default=0)
group.add_argument('--gnn_num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
group.add_argument('--gnn_emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
group.add_argument('--gnn_JK', type=str, default='last')
group.add_argument('--gnn_residual', action='store_true', default=False)
group = parser.add_argument_group('training')
group.add_argument('--devices', type=str, default="0",
help='which gpu to use if any (default: 0)')
group.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
group.add_argument('--eval_batch_size', type=int, default=None,
help='input batch size for training (default: train batch size)')
group.add_argument('--epochs', type=int, default=30,
help='number of epochs to train (default: 30)')
group.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
group.add_argument('--scheduler', type=str, default=None)
group.add_argument('--pct_start', type=float, default=0.3)
group.add_argument('--weight_decay', type=float, default=0.0)
group.add_argument('--grad_clip', type=float, default=None)
group.add_argument('--lr', type=float, default=0.001)
group.add_argument('--max_lr', type=float, default=0.001)
group.add_argument('--runs', type=int, default=10)
group.add_argument('--test-freq', type=int, default=1)
group.add_argument('--start-eval', type=int, default=15)
group.add_argument('--resume', type=str, default=None)
group.add_argument('--seed', type=int, default=None)
# fmt: on
args, _ = parser.parse_known_args()
dataset_util = DATASET_UTILS[args.dataset]()
dataset_util.add_args(parser)
args, _ = parser.parse_known_args()
# Setup Trainer and add customized args
trainer = get_trainer_and_parser(args, parser)
train = trainer.train
model_cls = get_model_and_parser(args, parser)
args = parser.parse_args()
data_transform = trainer.transform(args)
run_name = f"{args.dataset}+{model_cls.name(args)}"
run_name += f"+{trainer.name(args)}+lr={args.lr}+wd={args.weight_decay}"
if args.scheduler is not None:
run_name = run_name + f"+sch={args.scheduler}"
if args.seed:
run_name = run_name + f"+seed{args.seed}"
if args.wandb_run_idx is not None:
run_name = args.wandb_run_idx + "_" + run_name
wandb.run.name = run_name
device = (
torch.device("cuda")
if torch.cuda.is_available() and args.devices
else torch.device("cpu")
)
args.save_path = f"exps/{run_name}-{now}"
os.makedirs(args.save_path, exist_ok=True)
if args.resume is not None:
args.save_path = args.resume
logger.info(args)
wandb.config.update(args)
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if device == torch.cuda.is_available():
# cudnn.deterministic = True
torch.cuda.manual_seed(args.seed)
if "ogb" in args.dataset:
# automatic dataloading and splitting
dataset_ = PygGraphPropPredDataset(
name=args.dataset, root="./data", transform=data_transform
)
dataset_eval_ = PygGraphPropPredDataset(name=args.dataset, root="./data")
num_tasks, node_encoder_cls, edge_encoder_cls, deg = dataset_util.preprocess(
dataset_, dataset_eval_, model_cls, args
)
evaluator = Evaluator(
args.dataset
) # automatic evaluator. takes dataset name as input
else:
(
dataset_,
num_tasks,
node_encoder_cls,
edge_encoder_cls,
deg,
) = dataset_util.preprocess(args)
dataset_eval_ = dataset_
evaluator = None
task_type = dataset_.task_type
split_idx = dataset_.get_idx_split()
calc_loss = dataset_util.loss_fn(task_type)
eval = dataset_util.eval
def create_loader(dataset, dataset_eval):
test_data = compute_adjacency_list_cached(
dataset[split_idx["test"]], key=f"{args.dataset}_test"
)
valid_data = compute_adjacency_list_cached(
dataset_eval[split_idx["valid"]], key=f"{args.dataset}_valid"
)
train_data = compute_adjacency_list_cached(
dataset[split_idx["train"]], key=f"{args.dataset}_train"
)
logger.debug("Finished computing adjacency list")
eval_bs = (
args.batch_size if args.eval_batch_size is None else args.eval_batch_size
)
train_loader = DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
train_loader_eval = DataLoader(
train_data,
batch_size=eval_bs,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
)
valid_loader = DataLoader(
valid_data,
batch_size=eval_bs,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_data,
batch_size=eval_bs,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
)
return train_loader, train_loader_eval, valid_loader, test_loader
train_loader_, train_loader_eval_, valid_loader_, test_loader_ = create_loader(
dataset_, dataset_eval_
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def run(run_id):
if "ogb" not in args.dataset:
dataset, _, _, _, _ = dataset_util.preprocess(args)
dataset_eval = dataset
train_loader, train_loader_eval, valid_loader, test_loader = create_loader(
dataset, dataset_eval
)
else:
train_loader, train_loader_eval, valid_loader, test_loader = (
train_loader_,
train_loader_eval_,
valid_loader_,
test_loader_,
)
dataset = dataset_
node_encoder = node_encoder_cls()
os.makedirs(os.path.join(args.save_path, str(run_id)), exist_ok=True)
best_val, final_test = 0, 0
model = model_cls(
num_tasks=num_tasks,
args=args,
node_encoder=node_encoder,
edge_encoder_cls=edge_encoder_cls,
).to(device)
print("Model Parameters: ", count_parameters(model))
# exit(-1)
# model = nn.DataParallel(model)
wandb.watch(model)
optimizer = optim.AdamW(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
if args.scheduler == "plateau":
# NOTE(ajayjain): For Molhiv config, this min_lr is too high -- means that lr does not decay.
scheduler = ReduceLROnPlateau(
optimizer,
mode="min",
factor=0.5,
patience=20,
min_lr=0.0001,
verbose=False,
)
elif args.scheduler == "cosine":
scheduler = CosineAnnealingLR(
optimizer, T_max=args.epochs * len(train_loader), verbose=False
)
elif args.scheduler == "onecycle":
scheduler = OneCycleLR(
optimizer,
max_lr=args.max_lr,
epochs=args.epochs,
steps_per_epoch=len(train_loader),
pct_start=args.pct_start,
verbose=False,
)
elif args.scheduler is None:
scheduler = None
else:
raise NotImplementedError
# Load resume model, if any
start_epoch = 1
last_model_path = os.path.join(args.save_path, str(run_id), "last_model.pt")
if os.path.exists(last_model_path):
state_dict = torch.load(last_model_path)
start_epoch = state_dict["epoch"] + 1
model.load_state_dict(state_dict["model"])
optimizer.load_state_dict(state_dict["optimizer"])
if args.scheduler:
scheduler.load_state_dict(state_dict["scheduler"])
logger.info("[Resume] Loaded: {last_model_path} epoch: {start_epoch}")
model.epoch_callback(epoch=start_epoch - 1)
for epoch in range(start_epoch, args.epochs + 1):
logger.info(f"=====Epoch {epoch}=====")
logger.info("Training...")
logger.info("Total parameters: {}", utils.num_total_parameters(model))
logger.info(
"Trainable parameters: {}", utils.num_trainable_parameters(model)
)
loss = train(
model,
device,
train_loader,
optimizer,
args,
calc_loss,
scheduler if args.scheduler != "plateau" else None,
)
model.epoch_callback(epoch)
wandb.log(
{
f"train/loss-runs{run_id}": loss,
f"train/lr": optimizer.param_groups[0]["lr"],
f"epoch": epoch,
}
)
if args.scheduler == "plateau":
valid_perf = eval(model, device, valid_loader, evaluator)
valid_metric = valid_perf[dataset.eval_metric]
scheduler.step(valid_metric)
if (
epoch > args.start_eval
and epoch % args.test_freq == 0
or epoch in [1, args.epochs]
):
logger.info("Evaluating...")
with torch.no_grad():
train_perf = eval(model, device, train_loader_eval, evaluator)
if args.scheduler != "plateau":
valid_perf = eval(model, device, valid_loader, evaluator)
test_perf = eval(model, device, test_loader, evaluator)
train_metric, valid_metric, test_metric = (
train_perf[dataset.eval_metric],
valid_perf[dataset.eval_metric],
test_perf[dataset.eval_metric],
)
wandb.log(
{
f"train/{dataset.eval_metric}-runs{run_id}": train_metric,
f"valid/{dataset.eval_metric}-runs{run_id}": valid_metric,
f"test/{dataset.eval_metric}-runs{run_id}": test_metric,
"epoch": epoch,
}
)
logger.info(f"Running: {run_name} (runs {run_id})")
logger.info(
f"Run {run_id} - train: {train_metric}, val: {valid_metric}, test: {test_metric}"
)
# Save checkpoints
state_dict = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
}
state_dict["scheduler"] = (
scheduler.state_dict() if args.scheduler else None
)
torch.save(
state_dict,
os.path.join(args.save_path, str(run_id), "last_model.pt"),
)
logger.info(
"[Save] Save model: {}",
os.path.join(args.save_path, str(run_id), "last_model.pt"),
)
if best_val < valid_metric:
best_val = valid_metric
final_test = test_metric
wandb.run.summary[
f"best/valid/{dataset.eval_metric}-runs{run_id}"
] = valid_metric
wandb.run.summary[
f"best/test/{dataset.eval_metric}-runs{run_id}"
] = test_metric
torch.save(
state_dict,
os.path.join(args.save_path, str(run_id), "best_model.pt"),
)
logger.info(
"[Best Model] Save model: {}",
os.path.join(args.save_path, str(run_id), "best_model.pt"),
)
state_dict = torch.load(
os.path.join(args.save_path, str(run_id), "best_model.pt")
)
logger.info(
"[Evaluate] Loaded from {}",
os.path.join(args.save_path, str(run_id), "best_model.pt"),
)
model.load_state_dict(state_dict["model"])
best_valid_perf = eval(model, device, valid_loader, evaluator)
best_test_perf = eval(model, device, test_loader, evaluator)
return best_valid_perf[dataset.eval_metric], best_test_perf[dataset.eval_metric]
vals, tests = [], []
for run_id in range(args.runs):
best_val, final_test = run(run_id)
vals.append(best_val)
tests.append(final_test)
logger.info(f"Run {run_id} - val: {best_val}, test: {final_test}")
logger.info(f"Average val accuracy: {np.mean(vals)} ± {np.std(vals)}")
logger.info(f"Average test accuracy: {np.mean(tests)} ± {np.std(tests)}")
if __name__ == "__main__":
main()
| 2.09375
| 2
|
plugin/keypair.py
|
angry-tony/mist-cloudify-plugin
| 6
|
12781480
|
import os
from plugin import connection
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.decorators import operation
# TODO Are methods like `_get_path_to_key_file()` necessary, since we do not
# save keys on the local filesystem?
@operation
def creation_validation(**_):
""" This validates all nodes before bootstrap.
"""
key_file = _get_path_to_key_file()
key_file_in_filesystem = _search_for_key_file(key_file)
if ctx.node.properties['use_external_resource']:
if not key_file_in_filesystem:
raise NonRecoverableError(
'External resource, but the key file does not exist locally.')
try:
_get_key_pair_by_id(ctx.node.properties['resource_id'])
except NonRecoverableError as e:
raise NonRecoverableError(
'External resource, '
'but the key pair does not exist in the account: '
'{0}'.format(str(e)))
else:
if key_file_in_filesystem:
raise NonRecoverableError(
'Not external resource, '
'but the key file exists locally.')
try:
_get_key_pair_by_id(ctx.node.properties['resource_id'])
except NonRecoverableError:
pass
else:
raise NonRecoverableError(
'Not external resource, '
'but the key pair exists in the account.')
@operation
def create(**kwargs):
"""Creates a keypair."""
conn = connection.MistConnectionClient()
if _create_external_keypair():
return
key_pair_name = get_resource_id()
ctx.instance.runtime_properties["key"] = key_pair_name
ctx.instance.runtime_properties["mist_type"] = "keypair"
kp = conn.client.keys(search=key_pair_name)
if len(kp):
kp = kp[0]
return # if key already in mist.io, skip
else:
key_pair_name = ctx.node.properties["key_name"] # commented out in plugin?
private = conn.client.generate_key()
conn.client.add_key(key_name=key_pair_name, private=private)
conn.client.update_keys()
kp = conn.client.keys(search=key_pair_name)[0]
_save_key_pair(kp)
@operation
def delete(**kwargs):
"""Deletes a keypair."""
conn = connection.MistConnectionClient()
key_pair_name = get_external_resource_id_or_raise('delete key pair')
if _delete_external_keypair():
return
if key_pair_name:
try:
conn.client.keys(search=key_pair_name)[0].delete()
except Exception as exc:
raise NonRecoverableError('{0}'.format(str(exc)))
unassign_runtime_property_from_resource('mist_resource_id')
_delete_key_file()
ctx.logger.info('Deleted key pair: {0}'.format(key_pair_name))
else:
ctx.logger.info('Not deleting key pair from account')
def _create_external_keypair():
"""If use_external_resource is True, this will set the runtime_properties,
and then exit.
:param ctx: The Cloudify context.
:return False: Cloudify resource. Continue operation.
:return True: External resource. Set runtime_properties. Ignore operation.
:raises NonRecoverableError: If unable to locate the existing key file.
"""
if not use_external_resource(ctx.node.properties):
return False
ctx.instance.runtime_properties["mist_type"] = "keypair"
key_pair_name = ctx.node.properties['resource_id']
key_pair_in_account = _get_key_pair_by_id(key_pair_name)
key_path_in_filesystem = _get_path_to_key_file()
ctx.logger.debug(
'Path to key file: {0}.'.format(key_path_in_filesystem))
if not key_pair_in_account:
raise NonRecoverableError(
'External resource, but the key pair is not in the account.')
if not _search_for_key_file(key_path_in_filesystem):
_save_key_pair(key_pair_in_account)
ctx.instance.runtime_properties["key_id"] = key_pair_name
set_external_resource_id(key_pair_name)
return True
def _delete_external_keypair():
"""If use_external_resource is True, this will delete the runtime_properties,
and then exit.
:param ctx: The Cloudify context.
:return False: Cloudify resource. Continue operation.
:return True: External resource. Unset runtime_properties.
Ignore operation.
"""
if not use_external_resource(ctx.node.properties):
return False
ctx.logger.info('External resource. Not deleting keypair.')
unassign_runtime_property_from_resource(
"mist_resource_id")
return True
def _delete_key_file():
""" Deletes the key pair in the file specified in the blueprint.
:param ctx: The Cloudify context.
:raises NonRecoverableError: If unable to delete the local key file.
"""
key_path = _get_path_to_key_file()
if _search_for_key_file(key_path):
try:
os.remove(key_path)
except OSError as e:
raise NonRecoverableError(
'Unable to delete key pair: {0}.'
.format(str(e)))
def _save_key_pair(key_pair_object):
"""Saves a keypair to the filesystem.
:param key_pair_object: The key pair object as returned from create.
:param ctx: The Cloudify Context.
:raises NonRecoverableError: If private_key_path node property not set.
:raises NonRecoverableError: If Unable to save key file locally.
"""
ctx.logger.debug('Attempting to save the key_pair_object.')
if not key_pair_object.private:
raise NonRecoverableError(
'Cannot save key. KeyPair contains no private key.')
file_path = _get_path_to_key_file()
if not file_path:
return
if os.path.exists(file_path):
raise NonRecoverableError(
'{0} already exists, it will not be overwritten.'.format(
file_path))
fp = open(file_path, 'wb')
fp.write(key_pair_object.private)
fp.close()
_set_key_file_permissions(file_path)
def _set_key_file_permissions(key_file):
if os.access(key_file, os.W_OK):
os.chmod(key_file, 0o600)
else:
ctx.logger.error(
'Unable to set permissions key file: {0}.'.format(key_file))
def _get_key_pair_by_id(key_pair_id):
"""Returns the key pair object for a given key pair id.
:param key_pair_id: The ID of a keypair.
:returns The mist keypair object.
:raises NonRecoverableError: If Mist finds no matching key pairs.
"""
conn = connection.MistConnectionClient()
key_pairs = conn.client.keys(search=key_pair_id)
return key_pairs[0] if key_pairs else None
def _get_path_to_key_file():
"""Gets the path to the key file.
:param ctx: The Cloudify context.
:returns key_path: Path to the key file.
:raises NonRecoverableError: If private_key_path is not set.
"""
if not ctx.node.properties['private_key_path']:
ctx.logger.error('No private_key_path supplied. Moving on...')
return
return os.path.expanduser(ctx.node.properties['private_key_path'])
def _search_for_key_file(path_to_key_file):
""" Checks if the key_path exists in the local filesystem.
:param key_path: The path to the key pair file.
:return boolean if key_path exists (True) or not.
"""
return True if os.path.exists(path_to_key_file) else False
def get_resource_id():
"""Returns the resource id, if the user doesn't provide one,
this will create one for them.
:param node_properties: The node properties dictionary.
:return resource_id: A string.
"""
if ctx.node.properties['resource_id']:
return ctx.node.properties['resource_id']
elif ctx.node.properties['private_key_path']:
directory_path, filename = \
os.path.split(ctx.node.properties['private_key_path'])
resource_id = filename.split('.')[0]
return resource_id
def get_external_resource_id_or_raise(operation):
"""Checks if the EXTERNAL_RESOURCE_ID runtime_property is set and returns it.
:param operation: A string representing what is happening.
:param ctx_instance: The CTX Node-Instance Context.
:param ctx: The Cloudify ctx context.
:returns The EXTERNAL_RESOURCE_ID runtime_property for a CTX Instance.
:raises NonRecoverableError: If EXTERNAL_RESOURCE_ID has not been set.
"""
ctx.logger.debug(
'Checking if {0} in instance runtime_properties, for {0} operation.'
.format("mist_resource_id", operation))
if "mist_resource_id" not in ctx.instance.runtime_properties:
ctx.logger.error('Cannot {0}, because {1} is not assigned.'.format(
operation, "mist_resource_id"))
return
return ctx.instance.runtime_properties["mist_resource_id"]
def unassign_runtime_property_from_resource(property_name):
"""Pops a runtime_property and reports to debug.
:param property_name: The runtime_property to remove.
:param ctx_instance: The CTX Node-Instance Context.
:param ctx: The Cloudify ctx context.
"""
value = ctx.instance.runtime_properties.pop(property_name)
ctx.logger.debug(
'Unassigned {0} runtime property: {1}'.format(property_name, value))
def is_external_resource(properties):
return is_external_resource_by_properties(properties)
def is_external_resource_by_properties(properties):
return 'use_external_resource' in properties and \
properties['use_external_resource']
def use_external_resource(properties):
if not properties.get('use_external_resource'):
return None
if not "resource_id" in properties or not properties["resource_id"]:
raise NonRecoverableError(
'External resource, but resource not set.')
ctx.logger.debug(
'Resource Id: {0}'.format(properties["resource_id"]))
return True
def set_external_resource_id(value):
"""Sets the EXTERNAL_RESOURCE_ID runtime_property for a Node-Instance.
"""
ctx.instance.runtime_properties["mist_resource_id"] = value
| 2.421875
| 2
|
src/interface/__init__.py
|
ARTIEROCKS/artie-emotional-webservice
| 0
|
12781481
|
<filename>src/interface/__init__.py<gh_stars>0
from .emotional_interface import EmotionalInterface
| 1.179688
| 1
|
total_process.py
|
shingiyeon/cs474
| 0
|
12781482
|
<filename>total_process.py
import methods
import decoder
import postprocessing
import nltk
if __name__ == "__main__":
# nltk.download()
#methods.preprocessing("./data/temp/")
decoder.predict()
postprocessing.postprocessing("./")
| 1.8125
| 2
|
troposphere_mate/associate/apigw.py
|
tsuttsu305/troposphere_mate-project
| 10
|
12781483
|
<reponame>tsuttsu305/troposphere_mate-project
# -*- coding: utf-8 -*-
from troposphere_mate import Ref, GetAtt, Sub
from troposphere_mate import awslambda, apigateway
from ..core.associate_linker import Linker, x_depends_on_y, LinkerApi as LinkerApi_
from ..core.metadata import (
TROPOSPHERE_METADATA_FIELD_NAME,
ResourceLevelField,
initiate_default_resource_metadata,
)
class LinkerApi(LinkerApi_):
class ApiResourceWithRestApi(Linker):
"""
Ref: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-resource.html
"""
rtype1 = apigateway.Resource
rtype2 = apigateway.RestApi
def associate(self, api_resource, rest_api, has_parent_resource=True, **kwargs):
api_resource.RestApiId = Ref(rest_api)
if has_parent_resource is False:
api_resource.ParentId = GetAtt(rest_api, "RootResourceId")
metadata = initiate_default_resource_metadata(api_resource)
metadata[TROPOSPHERE_METADATA_FIELD_NAME][
ResourceLevelField.ApiResource.FULL_PATH] = api_resource.PathPart
api_resource.Metadata = metadata
x_depends_on_y(api_resource, rest_api)
class ApiResourceWithParentApiResource(Linker):
"""
Ref: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-resource.html
"""
rtype1 = apigateway.Resource
rtype2 = apigateway.Resource
def associate(self, api_resource1, api_resource2, **kwargs):
api_resource1.ParentId = Ref(api_resource2)
metadata = initiate_default_resource_metadata(api_resource1)
try:
metadata[TROPOSPHERE_METADATA_FIELD_NAME][ResourceLevelField.ApiResource.FULL_PATH] = "{}/{}".format(
api_resource2.Metadata[TROPOSPHERE_METADATA_FIELD_NAME][ResourceLevelField.ApiResource.FULL_PATH],
api_resource1.PathPart
)
except:
pass
api_resource1.Metadata = metadata
x_depends_on_y(api_resource1, api_resource2)
class ApiMethodWithApiResource(Linker):
"""
Ref: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-method.html
"""
rtype1 = apigateway.Method
rtype2 = apigateway.Resource
def associate(self, api_method, api_resource, **kwargs):
try:
api_method.RestApiId = api_resource.RestApiId
except:
pass
api_method.ResourceId = Ref(api_resource)
metadata = initiate_default_resource_metadata(api_method)
try:
metadata[TROPOSPHERE_METADATA_FIELD_NAME][ResourceLevelField.ApiResource.FULL_PATH] = \
api_resource.Metadata[TROPOSPHERE_METADATA_FIELD_NAME][ResourceLevelField.ApiResource.FULL_PATH]
except:
pass
api_method.Metadata = metadata
x_depends_on_y(api_method, api_resource)
class ApiMethodWithLambdFunction(Linker):
"""
Ref: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-method.html
"""
rtype1 = apigateway.Method
rtype2 = awslambda.Function
def associate(self, api_method, lbd_func, **kwargs):
try:
api_method.Integration.Type = "AWS"
api_method.Integration.IntegrationHttpMethod = "POST"
api_method.Integration.Uri = Sub(
"arn:aws:apigateway:${Region}:lambda:path/2015-03-31/functions/${LambdaArn}/invocations",
{
"Region": {"Ref": "AWS::Region"},
"LambdaArn": GetAtt(lbd_func, "Arn"),
}
)
except:
pass
x_depends_on_y(api_method, lbd_func)
class ApiMethodWithApiAuthorizer(Linker):
rtype1 = apigateway.Method
rtype2 = apigateway.Authorizer
def associate(self, api_method, api_authorizer, **kwargs):
api_method.AuthorizerId = Ref(api_authorizer)
x_depends_on_y(api_method, api_authorizer)
| 1.890625
| 2
|
modeltasks/eval.py
|
AshivDhondea/DFTS_compat_v1
| 3
|
12781484
|
import numpy as np
import sys
from collections import Counter
class CFeval(object):
"""Classification evaluator class"""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only classification accuracy
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(CFeval, self).__init__()
self.metrics = metrics
self.avgAcc = []
self.runThrough = False
def reset(self):
self.avgAcc = []
def evaluate(self, remoteOut, classValues):
"""Evaluates the predictions produced by the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
classValues: numpy array containing the ground truth labels
"""
predictions = np.argmax(remoteOut, axis=1)
self.avgAcc.append(np.sum(np.equal(predictions, classValues))/classValues.shape[0])
def simRes(self):
"""Returns the mean of the classification accuracies over all batches of predictions.
"""
self.avgAcc = np.array(self.avgAcc)
return [np.mean(self.avgAcc)]
class ODeval(object):
"""Object detection evaluator class."""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only mean average precision
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(ODeval, self).__init__()
self.metrics = metrics
self.iou = metrics['map']['iou'] #iterate through for loop for multiple values
self.reshapeDims = reshapeDims
self.n_classes = classes
self.pred_format = {'class_id': 0, 'conf': 1, 'xmin': 2, 'ymin': 3, 'xmax': 4, 'ymax': 5}
self.gt_format = {'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}
#pred format: class id, conf, xmin, ymin, xmax, ymax
#ground truth: class id, xmin, ymin, xmax, ymax
# The following lists all contain per-class data, i.e. all list have the length `n_classes + 1`,
# where one element is for the background class, i.e. that element is just a dummy entry.
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
self.num_gt_per_class = None
self.groundTruth = []
self.imageId = []
self.runThrough = False
def reset(self):
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
def evaluate(self, remoteOut, labels):
"""Evaluates the output of the predictions of the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
labels: ground truth labels corresponding to each image
"""
groundTruth = labels[1]
imageId = labels[0]
if not self.runThrough:
self.groundTruth+= list(groundTruth)
[self.imageId.append(i) for i in imageId]
self.predictOnBatch( remoteOut, imageId)
def simRes(self):
"""Evaluates the results of the simulation over all the iou values and returns a list
containing iou and corresponding mAp values.
"""
userRes = {}
# print(self.iou)
for i in self.iou:
# print(i)
userRes[i] = self.iterateOverIOU(self.prediction_results, i, self.imageId)
return np.array(list(userRes.items()))
def iterateOverIOU(self, preds, iou, imageId):
"""Calculates the desired metrics over all iou values.
# Arguments
preds: list containing per class prediction results of the model in the cloud
iou: IOU value for which the mAp has to be evaluated
imageId: list containing the image ID's of the images in the test set
# Returns
Mean Average Precision calculated over all classes
"""
return self.calcmAp(self.groundTruth, self.prediction_results, iou, imageId, self.n_classes)
def predictOnBatch(self, remoteOut, imageId):
"""Generates per batch predictions.
# Arguments
remoteOut: numpy ndarray representing the prediction of the model in the cloud
imageId: list containing the image ID's of all images in the batch
"""
class_id_pred = self.pred_format['class_id']
conf_pred = self.pred_format['conf']
xmin_pred = self.pred_format['xmin']
ymin_pred = self.pred_format['ymin']
xmax_pred = self.pred_format['xmax']
ymax_pred = self.pred_format['ymax']
y_pred_filtered = []
for i in range(len(remoteOut)):
y_pred_filtered.append(remoteOut[i][remoteOut[i, :, 0] !=0])
remoteOut = y_pred_filtered
for k, batch_item in enumerate(remoteOut):
image_id = imageId[k]
for box in batch_item:
class_id = int(box[class_id_pred])
confidence = box[conf_pred]
xmin = round(box[xmin_pred], 1)
ymin = round(box[ymin_pred], 1)
xmax = round(box[xmax_pred], 1)
ymax = round(box[ymax_pred], 1)
prediction = (image_id, confidence, xmin, ymin, xmax, ymax)
self.prediction_results[class_id].append(prediction)
def calcmAp(self, labels, predictions, IOUThreshold, imageIds, n_classes):
"""Calculate the mean average precision over all classes for a given IOU thershold.
# Arguments
labels: array containing the ground truth labels
predictions: list containing per class predictions
IOUThreshold: float value that represents the IOU threshold to be considered
imageIds: list containing image ID's of all images in the test set
n_classes: number of classes
# Returns
The mean average precision calculated over all classes
"""
groundTruths = []
detections = predictions
ret = []
num_classes = 0
gtsPerClass = [0]
for i in range(len(imageIds)):
imageBoxes = labels[i]
for j in range(len(imageBoxes)):
boxes = imageBoxes[j]
b = list(boxes)
b.insert(0, imageIds[i])
b.insert(2, 1)
groundTruths.append(b)
for c in range(1, n_classes+1):
dects = detections[c]
#pred format: image_id, confidence, xmin, ymin, xmax, ymax
#gt format: image_id, 'class_id', conf, 'xmin', 'ymin', 'xmax', 'ymax'
gts = []
[gts.append(g) for g in groundTruths if g[1]==c]
npos = len(gts)
gtsPerClass.append(npos)
if npos!=0:
num_classes+=1
dects = sorted(dects, key=lambda conf: conf[1], reverse=True)
TP = np.zeros(len(dects))
FP = np.zeros(len(dects))
det = Counter([cc[0] for cc in gts])
for key, val in det.items():
det[key] = np.zeros(val)
for d in range(len(dects)):
gt = [gt for gt in gts if gt[0]==dects[d][0]]
iouMax = sys.float_info.min
for j in range(len(gt)):
iou = evalIOU(dects[d][2:], gt[j][3:])
if iou>iouMax:
iouMax = iou
jmax = j
if iouMax>=IOUThreshold:
if det[dects[d][0]][jmax] == 0:
TP[d] = 1
det[dects[d][0]][jmax] = 1
else:
FP[d] = 1
acc_FP = np.cumsum(FP)
acc_TP = np.cumsum(TP)
rec = acc_TP/npos
prec = np.divide(acc_TP,(acc_FP+acc_TP))
[ap, mpre, mrec, ii] = CalculateAveragePrecision(rec, prec)
# print(ap)
ret.append(ap)
# tot = len(ret)
print(gtsPerClass)
print(ret)
return np.nansum(ret)/num_classes
def evalIOU(boxes1, boxes2):
"""Computes the intersection over union for the given pair of boxes.
# Arguments
boxes1: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The intersection over union of the regions under the boxes
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
xmin = 0
ymin = 1
xmax = 2
ymax = 3
intersection_areas = intersection_area_(boxes1, boxes2)
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + 1) * (boxes1[:, ymax] - boxes1[:, ymin] + 1)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + 1) * (boxes2[:, ymax] - boxes2[:, ymin] + 1)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
def intersection_area_(boxes1, boxes2):
"""Computes the intersection areas of the two boxes.
# Arguments
boxes1: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The area common to both the boxes
"""
xmin = 0
ymin = 1
xmax = 2
ymax = 3
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + 1)
return side_lengths[:,0] * side_lengths[:,1]
def CalculateAveragePrecision(rec, prec):
"""Compute the average precision for a particular class
# Arguments
rec: cumulative recall of the class under consideration
prec: cumulative precision of the class under consideration
# Returns
Average precision per class
"""
mrec = []
mrec.append(0)
[mrec.append(e) for e in rec]
mrec.append(1)
mpre = []
mpre.append(0)
[mpre.append(e) for e in prec]
mpre.append(0)
for i in range(len(mpre)-1, 0, -1):
mpre[i-1]=max(mpre[i-1],mpre[i])
ii = []
for i in range(len(mrec)-1):
if mrec[1:][i]!=mrec[0:-1][i]:
ii.append(i+1)
ap = 0
for i in ii:
ap = ap + np.sum((mrec[i]-mrec[i-1])*mpre[i])
# return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]
return [ap, mpre[0:len(mpre)-1], mrec[0:len(mpre)-1], ii]
| 2.859375
| 3
|
loopchain/baseservice/rest_service.py
|
JINWOO-J/loopchain
| 1
|
12781485
|
<reponame>JINWOO-J/loopchain<filename>loopchain/baseservice/rest_service.py
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""REST Service for Peer"""
import logging
import multiprocessing
import setproctitle
from loopchain import utils as util
from loopchain.baseservice import CommonProcess, CommonSubprocess
from loopchain.utils import command_arguments
from loopchain import configure as conf
class RestService(CommonProcess):
def __init__(self, port, peer_ip=None):
super().__init__()
self._port = port
self._peer_ip = peer_ip or util.get_private_ip()
self.start()
def run(self, conn, event: multiprocessing.Event):
logging.debug("RestService run...")
args = ['python3', '-m', 'loopchain', 'rest', '-p', str(self._port)]
args += command_arguments.get_raw_commands_by_filter(
command_arguments.Type.AMQPKey,
command_arguments.Type.RadioStationTarget
)
server = CommonSubprocess(args)
api_port = self._port + conf.PORT_DIFF_REST_SERVICE_CONTAINER
server.set_proctitle(f"{setproctitle.getproctitle()} RestServer api_port({api_port})")
logging.info(f'RestService run complete port {self._port}')
# complete init
event.set()
command = None
while command != "quit":
try:
command, param = conn.recv()
logging.debug(f"RestService got: {param}")
except Exception as e:
logging.warning(f"RestService conn.recv() error: {e}")
except KeyboardInterrupt:
pass
server.stop()
logging.info("RestService Ended.")
| 1.765625
| 2
|
Project_2/connect4/TD_learning/connect4.py
|
ElchinValiyev/GameAI
| 0
|
12781486
|
<reponame>ElchinValiyev/GameAI
import matplotlib.pyplot as plt
import numpy as np
import random
BOARDWIDTH = 7
BOARDHEIGHT = 6
EMPTY = 0
def make_autopct(values):
def my_autopct(pct):
if pct == 0:
return ""
else:
return '{p:.2f}% '.format(p=pct)
return my_autopct
def plotResults(red_wins, black_wins, tie):
# Setting up plot variables
labels = ['Red Wins', 'Black Wins', 'Ties']
sizes = [red_wins, black_wins, tie]
colors = ['yellowgreen', 'gold', 'lightskyblue']
explode = (0.1, 0, 0)
plt.pie(sizes, colors=colors, autopct=make_autopct(sizes), explode=explode, labels=labels, shadow=True,
startangle=70)
plt.axis('equal')
plt.tight_layout()
plt.show()
print "number of red wins ", red_wins
print "number of black wins ", black_wins
print "number of ties ", tie
def makeMove(board, player, column):
new_board = board.copy()
# Finding the lowest row for the selected column to place the player
lowest = getLowestEmptySpace(new_board, column)
if lowest != -1:
new_board[column][lowest] = player
return new_board
def getNewBoard():
# Create a new board and initilize all fields to zero
board = np.zeros((BOARDWIDTH, BOARDHEIGHT), dtype='int')
return board
def getRandomMove(board):
# pick a random column number which is Valid
while True:
x = random.randint(0, 6)
if isValidMove(board, x):
return x
def getLowestEmptySpace(board, column):
# Return the row number of the lowest empty row in the given column.
for y in range(BOARDHEIGHT - 1, -1, -1):
if board[column][y] == EMPTY:
return y
return -1
def isValidMove(board, column):
# Returns True if there is an empty space in the given column.
# Otherwise returns False.
if column < 0 or column >= (BOARDWIDTH) or board[column][0] != EMPTY:
return False
return True
def isBoardFull(board):
# Returns True if there are no empty spaces anywhere on the board.
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y] == EMPTY:
return False
return True
def isWinner(board, tile): # Checks if the move was the winning move
# check horizontal spaces
for x in range(BOARDWIDTH - 3):
for y in range(BOARDHEIGHT):
if board[x][y] == tile and board[x + 1][y] == tile and board[x + 2][y] == tile and board[x + 3][y] == tile:
return True
# check vertical spaces
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT - 3):
if board[x][y] == tile and board[x][y + 1] == tile and board[x][y + 2] == tile and board[x][y + 3] == tile:
return True
# check / diagonal spaces
for x in range(BOARDWIDTH - 3):
for y in range(3, BOARDHEIGHT):
if board[x][y] == tile and board[x + 1][y - 1] == tile and board[x + 2][y - 2] == tile and board[x + 3][
y - 3] == tile:
return True
# check \ diagonal spaces
for x in range(BOARDWIDTH - 3):
for y in range(BOARDHEIGHT - 3):
if board[x][y] == tile and board[x + 1][y + 1] == tile and board[x + 2][y + 2] == tile and board[x + 3][
y + 3] == tile:
return True
return False
def play_without_ui(agent_1, agent_2):
# Set up a blank board data structure.
board = getNewBoard()
player = 1
while True: # main game loop
if player == 1:
column = agent_1(board)
else:
column = agent_2(board)
board = makeMove(board, player, column)
if isWinner(board, player):
return player
player *= -1 # switch to other player's turn
if isBoardFull(board):
# A completely filled board means it's a tie.
return 0
def getNeuralInput(state):
new_state = state.reshape(1, BOARDHEIGHT * BOARDWIDTH)
x = np.append((new_state == 0).astype(int), (new_state == 1).astype(int))
x = np.append(x, (new_state == -1).astype(int))
return x
if __name__ == '__main__':
winners = [0, 0, 0]
for i in range(100000):
winners[play_without_ui(getRandomMove, getRandomMove)] += 1
plotResults(winners[1], winners[-1], winners[0])
| 3.609375
| 4
|
pyntcontrib/__init__.py
|
rags/pynt-contrib
| 5
|
12781487
|
import contextlib
import os
from subprocess import check_call, CalledProcessError
import sys
from pynt import task
__license__ = "MIT License"
__contact__ = "http://rags.github.com/pynt-contrib/"
@contextlib.contextmanager
def safe_cd(path):
"""
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
"""
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory)
@task()
def execute(script, *args, **kwargs):
"""
Executes a command through the shell. Spaces should breakup the args. Usage: execute('grep', 'TODO', '*')
NOTE: Any kwargs will be converted to args in the destination command.
E.g. execute('grep', 'TODO', '*', **{'--before-context': 5}) will be $grep todo * --before-context=5
"""
popen_args = [script] + list(args)
if kwargs:
popen_args.extend(_kwargs_to_execute_args(kwargs))
try:
return check_call(popen_args, shell=False)
except CalledProcessError as ex:
_print(ex)
sys.exit(ex.returncode)
except Exception as ex:
_print('Error: {} with script: {} and args {}'.format(ex, script, args))
sys.exit(1)
def _kwargs_to_execute_args(kwargs):
args = ['='.join([str(key), str(value)]) for key, value in kwargs.items()]
return args
def _print(*args):
print(args)
| 2.875
| 3
|
koku/sources/test/test_kafka_listener.py
|
Vasyka/koku
| 2
|
12781488
|
<reponame>Vasyka/koku<filename>koku/sources/test/test_kafka_listener.py
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the Sources Kafka Listener handler."""
import queue
from random import choice
from unittest.mock import patch
from uuid import uuid4
import requests_mock
from django.db import IntegrityError
from django.db import InterfaceError
from django.db import OperationalError
from django.db.models.signals import post_save
from django.forms.models import model_to_dict
from django.test.utils import override_settings
from faker import Faker
from kafka.errors import KafkaError
from rest_framework.exceptions import ValidationError
import sources.kafka_listener as source_integration
from api.iam.test.iam_test_case import IamTestCase
from api.provider.models import Provider
from api.provider.models import Sources
from api.provider.provider_builder import ProviderBuilder
from api.provider.provider_builder import ProviderBuilderError
from koku.middleware import IdentityHeaderMiddleware
from masu.prometheus_stats import WORKER_REGISTRY
from providers.provider_access import ProviderAccessor
from providers.provider_errors import SkipStatusPush
from sources import storage
from sources.config import Config
from sources.kafka_listener import PROCESS_QUEUE
from sources.kafka_listener import process_synchronize_sources_msg
from sources.kafka_listener import SourcesIntegrationError
from sources.kafka_listener import storage_callback
from sources.kafka_message_processor import ApplicationMsgProcessor
from sources.kafka_message_processor import AUTH_TYPES
from sources.kafka_message_processor import AuthenticationMsgProcessor
from sources.kafka_message_processor import KAFKA_APPLICATION_CREATE
from sources.kafka_message_processor import KAFKA_APPLICATION_DESTROY
from sources.kafka_message_processor import KAFKA_APPLICATION_UPDATE
from sources.kafka_message_processor import KAFKA_AUTHENTICATION_CREATE
from sources.kafka_message_processor import KAFKA_AUTHENTICATION_UPDATE
from sources.kafka_message_processor import KAFKA_SOURCE_DESTROY
from sources.sources_http_client import ENDPOINT_APPLICATION_TYPES
from sources.sources_http_client import ENDPOINT_APPLICATIONS
from sources.sources_http_client import ENDPOINT_AUTHENTICATIONS
from sources.sources_http_client import ENDPOINT_SOURCE_TYPES
from sources.sources_http_client import ENDPOINT_SOURCES
from sources.sources_http_client import SourcesHTTPClient
from sources.sources_provider_coordinator import SourcesProviderCoordinator
from sources.test.test_kafka_message_processor import msg_generator
from sources.test.test_kafka_message_processor import SOURCE_TYPE_IDS
from sources.test.test_kafka_message_processor import SOURCE_TYPE_IDS_MAP
from sources.test.test_sources_http_client import COST_MGMT_APP_TYPE_ID
from sources.test.test_sources_http_client import MOCK_URL
faker = Faker()
FAKE_AWS_ARN = "arn:aws:iam::111111111111:role/CostManagement"
FAKE_AWS_ARN2 = "arn:aws:iam::22222222222:role/CostManagement"
SOURCES_APPS = "http://www.sources.com/api/v1.0/applications?filter[application_type_id]={}&filter[source_id]={}"
def raise_source_manager_error(param_a, param_b, param_c, param_d, param_e):
"""Raise ProviderBuilderError"""
raise ProviderBuilderError()
def raise_validation_error(param_a, param_b, param_c, param_d, param_e):
"""Raise ValidationError"""
raise ValidationError()
def raise_provider_manager_error(param_a):
"""Raise ProviderBuilderError"""
raise ProviderBuilderError("test exception")
class MockKafkaConsumer:
def __init__(self, preloaded_messages=["hi", "world"]):
self.preloaded_messages = preloaded_messages
def start(self):
pass
def stop(self):
pass
def commit(self):
self.preloaded_messages.pop()
def seek(self, topic_partition):
return
def getone(self):
for msg in self.preloaded_messages:
return msg
raise KafkaError("Closing Mock Consumer")
def __aiter__(self):
return self
def __anext__(self):
return self.getone()
@patch.object(Config, "SOURCES_API_URL", MOCK_URL)
class SourcesKafkaMsgHandlerTest(IamTestCase):
"""Test Cases for the Sources Kafka Listener."""
@classmethod
def setUpClass(cls):
"""Set up the test class."""
super().setUpClass()
post_save.disconnect(storage_callback, sender=Sources)
account = "12345"
IdentityHeaderMiddleware.create_customer(account)
def setUp(self):
"""Setup the test method."""
super().setUp()
self.aws_local_source = {
"source_id": 11,
"source_uuid": uuid4(),
"name": "ProviderAWS Local",
"source_type": "AWS-local",
"authentication": {"credentials": {"role_arn": "arn:aws:iam::111111111111:role/CostManagement"}},
"billing_source": {"data_source": {"bucket": "fake-local-bucket"}},
"auth_header": Config.SOURCES_FAKE_HEADER,
"account_id": "acct10001",
"offset": 11,
}
self.uuids = {Provider.PROVIDER_AWS: uuid4()}
self.source_ids = {
Provider.PROVIDER_AWS: 10,
# Provider.PROVIDER_AZURE: 11,
# Provider.PROVIDER_GCP: 12,
# Provider.PROVIDER_OCP: 13,
}
self.sources = {
Provider.PROVIDER_AWS: {
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"source_uuid": self.uuids.get(Provider.PROVIDER_AWS),
"name": "Provider AWS",
"source_type": "AWS",
"authentication": {"credentials": {"role_arn": FAKE_AWS_ARN}},
"billing_source": {"data_source": {"bucket": "test_bucket"}},
"auth_header": Config.SOURCES_FAKE_HEADER,
"account_id": "12345",
"offset": 5,
}
}
self.updated_sources = {
Provider.PROVIDER_AWS: {
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"source_uuid": self.uuids.get(Provider.PROVIDER_AWS),
"name": "Provider AWS",
"source_type": "AWS",
"authentication": {"credentials": {"role_arn": FAKE_AWS_ARN2}},
"billing_source": {"data_source": {"bucket": "test_bucket_2"}},
"auth_header": Config.SOURCES_FAKE_HEADER,
"account_id": "12345",
"offset": 5,
}
}
self.mock_create_requests = {
Provider.PROVIDER_AWS: [
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATION_TYPES}/{COST_MGMT_APP_TYPE_ID}/sources?filter[id]={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"not": "empty"}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_SOURCE_TYPES}?filter[id]={SOURCE_TYPE_IDS_MAP.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"name": SOURCE_TYPE_IDS[SOURCE_TYPE_IDS_MAP[Provider.PROVIDER_AWS]]}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATIONS}?source_id={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"extra": {"bucket": "test_bucket"}}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_AUTHENTICATIONS}?source_id={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"id": self.source_ids.get(Provider.PROVIDER_AWS), "username": FAKE_AWS_ARN}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_SOURCES}/{self.source_ids.get(Provider.PROVIDER_AWS)}",
"status": 200,
"json": {
"name": "Provider AWS",
"source_type_id": SOURCE_TYPE_IDS_MAP.get(Provider.PROVIDER_AWS),
"uid": str(self.uuids.get(Provider.PROVIDER_AWS)),
},
},
]
}
self.mock_update_requests = {
Provider.PROVIDER_AWS: [
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATION_TYPES}/{COST_MGMT_APP_TYPE_ID}/sources?filter[id]={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"not": "empty"}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_SOURCE_TYPES}?filter[id]={SOURCE_TYPE_IDS_MAP.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"name": SOURCE_TYPE_IDS[SOURCE_TYPE_IDS_MAP[Provider.PROVIDER_AWS]]}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATIONS}?source_id={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"extra": {"bucket": "test_bucket_2"}}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_AUTHENTICATIONS}?source_id={self.source_ids.get(Provider.PROVIDER_AWS)}", # noqa: E501
"status": 200,
"json": {"data": [{"id": self.source_ids.get(Provider.PROVIDER_AWS), "username": FAKE_AWS_ARN2}]},
},
{
"url": f"{MOCK_URL}/api/v1.0/{ENDPOINT_SOURCES}/{self.source_ids.get(Provider.PROVIDER_AWS)}",
"status": 200,
"json": {
"name": "Provider AWS",
"source_type_id": SOURCE_TYPE_IDS_MAP.get(Provider.PROVIDER_AWS),
"uid": str(self.uuids.get(Provider.PROVIDER_AWS)),
},
},
]
}
def test_listen_for_messages_aws_create_update_delete(self):
"""Test for app/auth create, app/auth update, app/source delete."""
# First, test the create pathway:
msgs = [
msg_generator(
KAFKA_APPLICATION_CREATE,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
},
),
msg_generator(
KAFKA_AUTHENTICATION_CREATE,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"authtype": AUTH_TYPES.get(Provider.PROVIDER_AWS),
},
),
]
with requests_mock.mock() as m:
for resp in self.mock_create_requests.get(Provider.PROVIDER_AWS):
m.get(url=resp.get("url"), status_code=resp.get("status"), json=resp.get("json"))
for msg in msgs:
mock_consumer = MockKafkaConsumer([msg])
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
source = Sources.objects.get(source_id=self.source_ids.get(Provider.PROVIDER_AWS))
s = model_to_dict(source, fields=[f for f in self.sources.get(Provider.PROVIDER_AWS).keys()])
self.assertDictEqual(s, self.sources.get(Provider.PROVIDER_AWS), msg="failed create")
# now test the update pathway
msgs = [
msg_generator(
KAFKA_APPLICATION_UPDATE,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
},
),
msg_generator(
KAFKA_AUTHENTICATION_UPDATE,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"authtype": AUTH_TYPES.get(Provider.PROVIDER_AWS),
},
),
]
with requests_mock.mock() as m:
for resp in self.mock_update_requests.get(Provider.PROVIDER_AWS):
m.get(url=resp.get("url"), status_code=resp.get("status"), json=resp.get("json"))
for msg in msgs:
mock_consumer = MockKafkaConsumer([msg])
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
source = Sources.objects.get(source_id=self.source_ids.get(Provider.PROVIDER_AWS))
s = model_to_dict(source, fields=[f for f in self.updated_sources.get(Provider.PROVIDER_AWS).keys()])
self.assertDictEqual(s, self.updated_sources.get(Provider.PROVIDER_AWS), msg="failed update")
# now test the delete pathway
msgs = [
msg_generator(
KAFKA_APPLICATION_DESTROY,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
},
),
msg_generator(
KAFKA_SOURCE_DESTROY,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
},
),
]
for msg in msgs:
mock_consumer = MockKafkaConsumer([msg])
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
source = Sources.objects.get(source_id=self.source_ids.get(Provider.PROVIDER_AWS))
self.assertTrue(source.pending_delete, msg="failed delete")
def test_message_not_associated_with_cost_mgmt(self):
"""Test that messages not associated with cost-mgmt are not processed."""
table = [
{"processor": ApplicationMsgProcessor, "event": KAFKA_APPLICATION_CREATE},
{"processor": ApplicationMsgProcessor, "event": KAFKA_APPLICATION_UPDATE},
{"processor": ApplicationMsgProcessor, "event": KAFKA_APPLICATION_DESTROY},
{"processor": AuthenticationMsgProcessor, "event": KAFKA_AUTHENTICATION_CREATE},
{"processor": AuthenticationMsgProcessor, "event": KAFKA_AUTHENTICATION_UPDATE},
]
for test in table:
with self.subTest(test=test):
with requests_mock.mock() as m:
m.get(
url=f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATION_TYPES}/{COST_MGMT_APP_TYPE_ID}/sources?filter[id]=1", # noqa: E501
status_code=200,
json={"data": []},
)
with patch.object(test.get("processor"), "process") as mock_processor:
msg = msg_generator(
event_type=test.get("event"),
value={"id": 1, "source_id": 1, "application_type_id": COST_MGMT_APP_TYPE_ID + 1},
)
mock_consumer = MockKafkaConsumer([msg])
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
mock_processor.assert_not_called()
def test_listen_for_messages_exceptions_no_retry(self):
"""Test listen_for_messages exceptions that do not cause a retry."""
table = [
{"event": KAFKA_APPLICATION_CREATE, "header": True, "value": b'{"this value is messeged up}'},
{"event": KAFKA_APPLICATION_DESTROY, "header": False},
{"event": KAFKA_AUTHENTICATION_CREATE, "header": True},
]
for test in table:
with self.subTest(test=test):
with requests_mock.mock() as m:
m.get(
url=f"{MOCK_URL}/api/v1.0/{ENDPOINT_APPLICATION_TYPES}/{COST_MGMT_APP_TYPE_ID}/sources?filter[id]=1", # noqa: E501
status_code=404,
json={},
)
msg = msg_generator(test.get("event"))
if not test.get("header"):
msg = msg_generator(test.get("event"), header=None)
if test.get("value"):
msg._value = test.get("value")
mock_consumer = MockKafkaConsumer([msg])
with patch("sources.kafka_listener.time.sleep") as mock_time:
with patch.object(MockKafkaConsumer, "commit") as mocked_mock_consumer:
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
mocked_mock_consumer.assert_called()
mock_time.assert_not_called()
def test_listen_for_messages_http_exception(self):
"""Test listen_for_messages exceptions that cause a retry (http errors)."""
table = [KAFKA_AUTHENTICATION_CREATE, KAFKA_AUTHENTICATION_UPDATE]
for test in table:
with self.subTest(test=test):
with requests_mock.mock() as m:
for resp in self.mock_create_requests.get(Provider.PROVIDER_AWS):
m.get(url=resp.get("url"), status_code=401, json=resp.get("json"))
msg = msg_generator(
test,
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
"authtype": choice(list(AUTH_TYPES.values())),
},
)
mock_consumer = MockKafkaConsumer([msg])
with patch("sources.kafka_listener.time.sleep") as mock_time:
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
mock_time.assert_called()
def test_listen_for_messages_exceptions_db_errors(self):
"""Test listen_for_messages exceptions that cause a retry (db errors)."""
table = [
{
"event": KAFKA_APPLICATION_CREATE,
"exception": InterfaceError,
"path": "sources.storage.create_source_event",
},
{
"event": KAFKA_AUTHENTICATION_CREATE,
"exception": OperationalError,
"path": "sources.storage.create_source_event",
},
{
"event": KAFKA_APPLICATION_DESTROY,
"exception": IntegrityError,
"path": "sources.storage.enqueue_source_delete",
},
]
for test in table:
with self.subTest(test=test):
with requests_mock.mock() as m:
for resp in self.mock_create_requests.get(Provider.PROVIDER_AWS):
m.get(url=resp.get("url"), status_code=resp.get("status"), json=resp.get("json"))
msg = msg_generator(
test.get("event"),
value={
"id": 1,
"source_id": self.source_ids.get(Provider.PROVIDER_AWS),
"application_type_id": COST_MGMT_APP_TYPE_ID,
"authtype": choice(list(AUTH_TYPES.values())),
},
)
mock_consumer = MockKafkaConsumer([msg])
with patch(test.get("path"), side_effect=test.get("exception")):
with patch("sources.kafka_listener.time.sleep") as mock_time:
with patch("sources.kafka_listener.close_and_set_db_connection"):
source_integration.listen_for_messages(msg, mock_consumer, COST_MGMT_APP_TYPE_ID)
mock_time.assert_called()
def test_execute_koku_provider_op_create(self):
"""Test to execute Koku Operations to sync with Sources for creation."""
source_id = self.source_ids.get(Provider.PROVIDER_AWS)
provider = Sources(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
msg = {"operation": "create", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status"):
with patch.object(ProviderAccessor, "cost_usage_source_ready", returns=True):
source_integration.execute_koku_provider_op(msg)
self.assertIsNotNone(Sources.objects.get(source_id=source_id).koku_uuid)
self.assertFalse(Sources.objects.get(source_id=source_id).pending_update)
self.assertEqual(Sources.objects.get(source_id=source_id).koku_uuid, str(provider.source_uuid))
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
def test_execute_koku_provider_op_destroy(self):
"""Test to execute Koku Operations to sync with Sources for destruction."""
source_id = self.source_ids.get(Provider.PROVIDER_AWS)
provider = Sources(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
msg = {"operation": "destroy", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status"):
source_integration.execute_koku_provider_op(msg)
self.assertEqual(Sources.objects.filter(source_id=source_id).exists(), False)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
def test_execute_koku_provider_op_destroy_provider_not_found(self):
"""Test to execute Koku Operations to sync with Sources for destruction with provider missing.
First, raise ProviderBuilderError. Check that provider and source still exists.
Then, re-call provider destroy without exception, then see both source and provider are gone.
"""
source_id = self.source_ids.get(Provider.PROVIDER_AWS)
provider = Sources(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
# check that the source exists
self.assertTrue(Sources.objects.filter(source_id=source_id).exists())
with patch.object(ProviderAccessor, "cost_usage_source_ready", returns=True):
builder = SourcesProviderCoordinator(source_id, provider.auth_header)
builder.create_account(provider)
self.assertTrue(Provider.objects.filter(uuid=provider.source_uuid).exists())
provider = Sources.objects.get(source_id=source_id)
msg = {"operation": "destroy", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status"):
with patch.object(ProviderBuilder, "destroy_provider", side_effect=raise_provider_manager_error):
source_integration.execute_koku_provider_op(msg)
self.assertTrue(Provider.objects.filter(uuid=provider.source_uuid).exists())
self.assertTrue(Sources.objects.filter(source_uuid=provider.source_uuid).exists())
self.assertTrue(Sources.objects.filter(koku_uuid=provider.source_uuid).exists())
with patch.object(SourcesHTTPClient, "set_source_status"):
source_integration.execute_koku_provider_op(msg)
self.assertFalse(Provider.objects.filter(uuid=provider.source_uuid).exists())
def test_execute_koku_provider_op_update(self):
"""Test to execute Koku Operations to sync with Sources for update."""
def set_status_helper(*args, **kwargs):
"""helper to clear update flag."""
storage.clear_update_flag(source_id)
source_id = self.source_ids.get(Provider.PROVIDER_AWS)
provider = Sources(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
msg = {"operation": "create", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status"):
with patch.object(ProviderAccessor, "cost_usage_source_ready", returns=True):
source_integration.execute_koku_provider_op(msg)
builder = SourcesProviderCoordinator(source_id, provider.auth_header)
source = storage.get_source_instance(source_id)
uuid = source.koku_uuid
with patch.object(ProviderAccessor, "cost_usage_source_ready", returns=True):
builder.update_account(source)
self.assertEqual(
Provider.objects.get(uuid=uuid).billing_source.data_source,
self.sources.get(Provider.PROVIDER_AWS).get("billing_source").get("data_source"),
)
provider.billing_source = {"data_source": {"bucket": "new-bucket"}}
provider.koku_uuid = uuid
provider.pending_update = True
provider.save()
msg = {"operation": "update", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status", side_effect=set_status_helper):
with patch.object(ProviderAccessor, "cost_usage_source_ready", returns=True):
source_integration.execute_koku_provider_op(msg)
response = Sources.objects.get(source_id=source_id)
self.assertEqual(response.pending_update, False)
self.assertEqual(response.billing_source, {"data_source": {"bucket": "new-bucket"}})
response = Provider.objects.get(uuid=uuid)
self.assertEqual(response.billing_source.data_source.get("bucket"), "new-bucket")
def test_execute_koku_provider_op_skip_status(self):
"""Test to execute Koku Operations to sync with Sources and not push status."""
source_id = self.source_ids.get(Provider.PROVIDER_AWS)
provider = Sources(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
msg = {"operation": "create", "provider": provider, "offset": provider.offset}
with patch.object(SourcesHTTPClient, "set_source_status"):
with patch.object(ProviderAccessor, "cost_usage_source_ready", side_effect=SkipStatusPush):
source_integration.execute_koku_provider_op(msg)
self.assertEqual(Sources.objects.get(source_id=source_id).status, {})
def test_collect_pending_items(self):
"""Test to load the in-progress queue."""
aws_source = Sources(
source_id=1,
auth_header=Config.SOURCES_FAKE_HEADER,
offset=1,
name="AWS Source",
source_type=Provider.PROVIDER_AWS,
authentication="fakeauth",
billing_source="s3bucket",
)
aws_source.save()
aws_source_incomplete = Sources(
source_id=2,
auth_header=Config.SOURCES_FAKE_HEADER,
offset=2,
name="AWS Source 2",
source_type=Provider.PROVIDER_AWS,
)
aws_source_incomplete.save()
ocp_source = Sources(
source_id=3,
auth_header=Config.SOURCES_FAKE_HEADER,
authentication="fakeauth",
offset=3,
name="OCP Source",
source_type=Provider.PROVIDER_OCP,
)
ocp_source.save()
ocp_source_complete = Sources(
source_id=4,
auth_header=Config.SOURCES_FAKE_HEADER,
offset=4,
name="Complete OCP Source",
source_type=Provider.PROVIDER_OCP,
koku_uuid=faker.uuid4(),
)
ocp_source_complete.save()
source_delete = Sources.objects.get(source_id=4)
source_delete.pending_delete = True
source_delete.save()
response = source_integration._collect_pending_items()
self.assertEqual(len(response), 3)
@patch("time.sleep", side_effect=None)
@patch("sources.kafka_listener.check_kafka_connection", side_effect=[bool(0), bool(1)])
def test_kafka_connection_metrics_listen_for_messages(self, mock_start, mock_sleep):
"""Test check_kafka_connection increments kafka connection errors on KafkaError."""
connection_errors_before = WORKER_REGISTRY.get_sample_value("kafka_connection_errors_total")
source_integration.is_kafka_connected()
connection_errors_after = WORKER_REGISTRY.get_sample_value("kafka_connection_errors_total")
self.assertEqual(connection_errors_after - connection_errors_before, 1)
# @patch.object(Config, "SOURCES_API_URL", "http://www.sources.com")
# def test_process_message_application_unsupported_source_type(self):
# """Test the process_message function with an unsupported source type."""
# test_application_id = 2
# test = {
# "event": source_integration.KAFKA_APPLICATION_CREATE,
# "value": {"id": 1, "source_id": 1, "application_type_id": test_application_id},
# }
# msg_data = MsgDataGenerator(event_type=test.get("event"), value=test.get("value")).get_data()
# with patch.object(
# SourcesHTTPClient, "get_source_details", return_value={"name": "my ansible", "source_type_id": 2}
# ):
# with patch.object(SourcesHTTPClient, "get_application_settings", return_value={}):
# with patch.object(SourcesHTTPClient, "get_source_type_name", return_value="ansible-tower"):
# # self.assertIsNone(process_message(test_application_id, msg_data))
# self.assertIsNone(msg_data)
# pass
@patch("sources.kafka_listener.execute_koku_provider_op")
def test_process_synchronize_sources_msg_db_error(self, mock_process_message):
"""Test processing synchronize messages with database errors."""
provider = Sources.objects.create(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
test_queue = queue.PriorityQueue()
test_matrix = [
{"test_value": {"operation": "update", "provider": provider}, "side_effect": InterfaceError},
{"test_value": {"operation": "update", "provider": provider}, "side_effect": OperationalError},
]
for i, test in enumerate(test_matrix):
mock_process_message.side_effect = test.get("side_effect")
with patch("sources.kafka_listener.close_and_set_db_connection") as close_mock:
with patch.object(Config, "RETRY_SECONDS", 0):
process_synchronize_sources_msg((i, test["test_value"]), test_queue)
close_mock.assert_called()
for i in range(2):
priority, _ = test_queue.get_nowait()
self.assertEqual(priority, i)
@patch("sources.kafka_listener.execute_koku_provider_op")
def test_process_synchronize_sources_msg_integration_error(self, mock_process_message):
"""Test processing synchronize messages with database errors."""
provider = Sources.objects.create(**self.sources.get(Provider.PROVIDER_AWS))
provider.save()
test_queue = queue.PriorityQueue()
test_matrix = [
{"test_value": {"operation": "update", "provider": provider}, "side_effect": IntegrityError},
{"test_value": {"operation": "update", "provider": provider}, "side_effect": SourcesIntegrationError},
]
for i, test in enumerate(test_matrix):
mock_process_message.side_effect = test.get("side_effect")
with patch.object(Config, "RETRY_SECONDS", 0):
process_synchronize_sources_msg((i, test["test_value"]), test_queue)
for i in range(2):
priority, _ = test_queue.get_nowait()
self.assertEqual(priority, i)
# @patch("sources.kafka_listener.execute_koku_provider_op")
# def test_process_synchronize_sources_msg(self, mock_process_message):
# """Test processing synchronize messages."""
# provider = Sources(**self.aws_source)
# test_queue = queue.PriorityQueue()
# messages = [
# {"operation": "create", "provider": provider, "offset": provider.offset},
# {"operation": "update", "provider": provider},
# ]
# for msg in messages:
# with patch("sources.storage.clear_update_flag") as mock_clear_flag:
# process_synchronize_sources_msg((0, msg), test_queue)
# mock_clear_flag.assert_called()
# msg = {"operation": "destroy", "provider": provider}
# with patch("sources.storage.clear_update_flag") as mock_clear_flag:
# process_synchronize_sources_msg((0, msg), test_queue)
# mock_clear_flag.assert_not_called()
def test_storage_callback_create(self):
"""Test storage callback puts create task onto queue."""
local_source = Sources(**self.aws_local_source, pending_update=True)
local_source.save()
with patch("sources.kafka_listener.execute_process_queue"):
storage_callback("", local_source)
_, msg = PROCESS_QUEUE.get_nowait()
self.assertEqual(msg.get("operation"), "create")
def test_storage_callback_update(self):
"""Test storage callback puts update task onto queue."""
uuid = self.aws_local_source.get("source_uuid")
local_source = Sources(**self.aws_local_source, koku_uuid=uuid, pending_update=True)
local_source.save()
with patch("sources.kafka_listener.execute_process_queue"), patch(
"sources.storage.screen_and_build_provider_sync_create_event", return_value=False
):
storage_callback("", local_source)
_, msg = PROCESS_QUEUE.get_nowait()
self.assertEqual(msg.get("operation"), "update")
def test_storage_callback_update_and_delete(self):
"""Test storage callback only deletes on pending update and delete."""
uuid = self.aws_local_source.get("source_uuid")
local_source = Sources(**self.aws_local_source, koku_uuid=uuid, pending_update=True, pending_delete=True)
local_source.save()
with patch("sources.kafka_listener.execute_process_queue"), patch(
"sources.storage.screen_and_build_provider_sync_create_event", return_value=False
):
storage_callback("", local_source)
_, msg = PROCESS_QUEUE.get_nowait()
self.assertEqual(msg.get("operation"), "destroy")
| 1.578125
| 2
|
images.py
|
ahrnbom/guts
| 0
|
12781489
|
"""
Copyright (C) 2022 <NAME>
Released under MIT License. See the file LICENSE for details.
Module for some classes that describe sequences of images.
If your custom dataset stores images in some other way,
create a subclass of ImageSequence and use it.
"""
from typing import List
import numpy as np
from pathlib import Path
import imageio as iio
class ImageSequence:
def load(self, im_num:int) -> np.ndarray:
pass
def number_of_frames(self) -> int:
pass
def start_frame(self) -> int:
pass
class FolderSequence(ImageSequence):
def __init__(self, folder:Path):
self.images = folder.glob('*.jpg')
self.images = list(self.images)
self.images.sort()
def load(self, im_num:int) -> np.ndarray:
return iio.imread(self.images[im_num])
def number_of_frames(self) -> int:
return len(self.images)
def start_frame(self) -> int:
return 0
class VideoSequence(ImageSequence):
def __init__(self, vid_file:Path):
assert vid_file.is_file()
self.vid = iio.get_reader(vid_file)
self.frame_count = None
def __del__(self):
# Attempt to clean up
self.vid.close()
def load(self, im_num:int) -> np.ndarray:
return self.vid.get_data(im_num)
def number_of_frames(self) -> int:
if self.frame_count is None:
self.frame_count = self.vid.count_frames()
return self.frame_count
def start_frame(self) -> int:
return 0
| 2.96875
| 3
|
python/shellchain.py
|
sharmavins23/Shellchain
| 0
|
12781490
|
from time import time
import hashlib
import json
from urllib.parse import urlparse
import requests
# Class definition of our shellchain (Blockchain-like) object
class Shellchain:
def __init__(self): # constructor
self.current_transactions = []
self.chain = []
self.rivers = set()
# the first seashell has to be created
self.dig_shell(previous_hash='1', proof=100) # first seashell in chain
# mine a new block
def dig_shell(self, proof, previous_hash):
shell = {
# point in chain that shell occurs at
'index': len(self.chain) + 1,
'timestamp': time(), # current timestamp using date/time library
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1])
}
# reset current list of transactions
self.current_transactions = []
# add new shell to shellchain
self.chain.append(shell)
return shell
# sale of fish based on certain garbage amount removed
def fish_sale(self, sender, amount, garbageAmount):
self.current_transactions.append({
'type': 'fish_sale',
'sender': sender,
'amount': amount,
'garbageAmount': garbageAmount
})
# return index of new transaction
return self.last_shell['index'] + 1
# trades a number of fish between rivers
def fish_trade(self, sender, recipient, amount):
self.current_transactions.append({
'type': 'fish_trade',
'sender': sender,
'recipient': recipient,
'amount': amount
})
# return index of new transaction
return self.last_shell['index'] + 1
# a crab is caught thieving
def crab_catch(self, sender, mafia, amount, garbageAmount):
self.current_transactions.append({
'type': 'crab_catch',
'sender': sender,
'mafia': mafia,
'amount': amount,
'garbageAmount': garbageAmount
})
# return index of new transaction
return self.last_shell['index'] + 1
@property
def last_shell(self):
return self.chain[-1]
@staticmethod
def hash(shell):
shell_string = json.dumps(shell, sort_keys=True).encode()
return hashlib.sha256(shell_string).hexdigest()
def proof_of_work(self, last_shell):
# find a number p' such that hash(pp') contains 4 leading zeroes
# p is previous proof, p' is new proof
last_proof = last_shell['proof']
last_hash = self.hash(last_shell) # hashes last shell's proof of work
proof = 0
# checks every proof value until true
while self.valid_proof(last_proof, proof, last_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof, last_hash):
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
| 3.234375
| 3
|
LineByLineEqualEvaluator.py
|
revygabor/HWTester
| 0
|
12781491
|
<reponame>revygabor/HWTester
import Evaluator
class LineByLineEqualEvaluator(Evaluator.Evaluator):
def __init__(self, details):
pass
def evaluate(self, input, target_output, output, log):
if output != target_output:
output_lines = output.split("\n")
target_output_lines = target_output.split("\n")
goodlines = 0
errmsg = None
for i in range(len(target_output_lines)):
if len(output_lines) <= i:
if errmsg:
return (float(goodlines) / len(target_output_lines), errmsg)
return (float(goodlines) / len(target_output_lines), "Missing line %d in output: \n%s\n\nfor input: \n%s\n" % (i+1, log.truncate(output), input))
if output_lines[i].strip() != target_output_lines[i].strip() and not errmsg:
errmsg = "Wrong answer in line %d of %d in output: \n%s\n\nfor input: \n%s\n" % (i+1, len(output_lines), log.truncate(output), input) # + "total output was:"+''.join(output_lines)
if output_lines[i].strip() == target_output_lines[i].strip():
goodlines += 1
#print 'goodlines +=1',output_lines[i],target_output_lines[i]
return (float(goodlines) / len(target_output_lines), errmsg)
return (1.0, "")
| 2.78125
| 3
|
acora/__init__.py
|
scoder/acora
| 209
|
12781492
|
"""\
Acora - a multi-keyword search engine based on Aho-Corasick trees.
Usage::
>>> from acora import AcoraBuilder
Collect some keywords::
>>> builder = AcoraBuilder('ab', 'bc', 'de')
>>> builder.add('a', 'b')
Generate the Acora search engine::
>>> ac = builder.build()
Search a string for all occurrences::
>>> ac.findall('abc')
[('a', 0), ('ab', 0), ('b', 1), ('bc', 1)]
>>> ac.findall('abde')
[('a', 0), ('ab', 0), ('b', 1), ('de', 2)]
"""
from __future__ import absolute_import
import sys
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
unicode = str
FILE_BUFFER_SIZE = 32 * 1024
class PyAcora(object):
"""A simple (and very slow) Python implementation of the Acora
search engine.
"""
transitions = None
def __init__(self, machine, transitions=None):
if transitions is not None:
# old style format
start_state = machine
self.transitions = dict([
((state.id, char), (target_state.id, target_state.matches))
for ((state, char), target_state) in transitions.items()])
else:
# new style Machine format
start_state = machine.start_state
ignore_case = machine.ignore_case
self.transitions = transitions = {}
child_states = machine.child_states
child_targets = {}
state_matches = {}
needs_bytes_conversion = None
for state in child_states:
state_id = state.id
child_targets[state_id], state_matches[state_id] = (
_merge_targets(state, ignore_case))
if needs_bytes_conversion is None and state_matches[state_id]:
if IS_PY3:
needs_bytes_conversion = any(
isinstance(s, bytes) for s in state_matches[state_id])
elif any(isinstance(s, unicode) for s in state_matches[state_id]):
# in Py2, some keywords might be str even though we're processing unicode
needs_bytes_conversion = False
if needs_bytes_conversion is None and not IS_PY3:
needs_bytes_conversion = True
if needs_bytes_conversion:
if IS_PY3:
convert = ord
else:
from codecs import latin_1_encode
def convert(s):
return latin_1_encode(s)[0]
else:
convert = None
get_child_targets = child_targets.get
get_matches = state_matches.get
state_id = start_state.id
for ch, child in _merge_targets(start_state, ignore_case)[0].items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
for state in child_states:
state_id = state.id
for ch, child in get_child_targets(state_id).items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
self.start_state = start_state.id
def finditer(self, s):
"""Iterate over all occurrences of any keyword in the string.
Returns (keyword, offset) pairs.
"""
state = self.start_state
start_state = (state, [])
next_state = self.transitions.get
pos = 0
for char in s:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
def findall(self, s):
"""Find all occurrences of any keyword in the string.
Returns a list of (keyword, offset) pairs.
"""
return list(self.finditer(s))
def filefind(self, f):
"""Iterate over all occurrences of any keyword in a file.
Returns (keyword, offset) pairs.
"""
opened = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
opened = True
try:
state = self.start_state
start_state = (state, ())
next_state = self.transitions.get
pos = 0
while 1:
data = f.read(FILE_BUFFER_SIZE)
if not data:
break
for char in data:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
finally:
if opened:
f.close()
def filefindall(self, f):
"""Find all occurrences of any keyword in a file.
Returns a list of (keyword, offset) pairs.
"""
return list(self.filefind(f))
# import from shared Python/Cython module
from acora._acora import (
insert_bytes_keyword, insert_unicode_keyword,
build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets)
# import from Cython module if available
try:
from acora._cacora import (
UnicodeAcora, BytesAcora, insert_bytes_keyword, insert_unicode_keyword)
except ImportError:
# C module not there ...
UnicodeAcora = BytesAcora = PyAcora
class AcoraBuilder(object):
"""The main builder class for an Acora search engine.
Add keywords by calling ``.add(*keywords)`` or by passing them
into the constructor. Then build the search engine by calling
``.build()``.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
ignore_case = False
def __init__(self, *keywords, **kwargs):
if kwargs:
self.ignore_case = kwargs.pop('ignore_case', False)
if kwargs:
raise TypeError(
"%s() got unexpected keyword argument %s" % (
self.__class__.__name__, next(iter(kwargs))))
if len(keywords) == 1 and isinstance(keywords[0], (list, tuple)):
keywords = keywords[0]
self.for_unicode = None
self.state_counter = 1
self.keywords = set()
self.tree = _MachineState(0)
if keywords:
self.update(keywords)
def __update(self, keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if not keywords:
return
self.tree = None
self.keywords.update(keywords)
if self.for_unicode is None:
for keyword in keywords:
if isinstance(keyword, unicode):
self.for_unicode = True
elif isinstance(keyword, bytes):
self.for_unicode = False
else:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
break
# validate input string types
marker = object()
if self.for_unicode:
for keyword in keywords:
if not isinstance(keyword, unicode):
break
else:
keyword = marker
else:
for keyword in keywords:
if not isinstance(keyword, bytes):
break
else:
keyword = marker
if keyword is not marker:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
def add(self, *keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if keywords:
self.update(keywords)
def build(self, ignore_case=None, acora=None):
"""Build a search engine from the aggregated keywords.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
if acora is None:
if self.for_unicode:
acora = UnicodeAcora
else:
acora = BytesAcora
if self.for_unicode == False and ignore_case:
import sys
if sys.version_info[0] >= 3:
raise ValueError(
"Case insensitive search is not supported for byte strings in Python 3")
if ignore_case is not None and ignore_case != self.ignore_case:
# must rebuild tree
builder = type(self)(ignore_case=ignore_case)
builder.update(self.keywords)
return builder.build(acora=acora)
return acora(_build_trie(self.tree, ignore_case=self.ignore_case))
def update(self, keywords):
for_unicode = self.for_unicode
ignore_case = self.ignore_case
insert_keyword = insert_unicode_keyword if for_unicode else insert_bytes_keyword
for keyword in keywords:
if for_unicode is None:
for_unicode = self.for_unicode = isinstance(keyword, unicode)
insert_keyword = (
insert_unicode_keyword if for_unicode else insert_bytes_keyword)
elif for_unicode != isinstance(keyword, unicode):
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
self.state_counter = insert_keyword(
self.tree, keyword, self.state_counter, ignore_case)
self.keywords.update(keywords)
### convenience functions
def search(s, *keywords):
"""Convenience function to search a string for keywords.
"""
acora = AcoraBuilder(keywords).build()
return acora.findall(s)
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s)
| 2.609375
| 3
|
python/01/led_pwm_hw.py
|
matsujirushi/raspi_parts_kouryaku
| 6
|
12781493
|
<filename>python/01/led_pwm_hw.py
import sys
import pigpio
LED_PIN = 12
LED_PWM_FREQUENCY = 8000
pwm_duty = float(sys.argv[1])
pi = pigpio.pi()
pi.set_mode(LED_PIN, pigpio.OUTPUT)
pi.hardware_PWM(LED_PIN, int(LED_PWM_FREQUENCY), int(pwm_duty * 1e6))
| 3.046875
| 3
|
src/app.py
|
arashshams/dsci532-group3
| 0
|
12781494
|
import os
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import altair as alt
import geopandas as gpd
import pandas as pd
from datetime import datetime, date
import json
import plotly.express as px
from enum import Enum
class SelectionMode(Enum):
World = 1
Continents = 2
Countries = 3
# As there is only 1 callback function allowed to map to each output,
# we use this to check which value got updated and updating the plots
# accordingly
def is_updated(key, new_value):
return ((prev_vals[key] is None and new_value is not None)
or (prev_vals[key] is not None and prev_vals[key] != new_value))
def is_perCapita(key):
"""
Returns true if per capita is selected, false otherwise
Parameters
----------
key : str
value of option selectected (absolute or per capita)
Returns
-------
boolean
true if key is "Per Capita", else false
"""
return key == "Per Capita"
def calculate_continent_daywise(countries_daywise_df):
"""
Returns the output of calculate_continent_statistics()
Parameters
----------
countries_daywise_df : df
dataframe of daily observations
Returns
-------
continents_df
output of calculate_continent_statistics()
"""
return calculate_continent_statistics(countries_daywise_df, 'Date')
def calculate_continent_statistics(countries_df, group_col):
"""
Returns dataframe based on a column to group the data by
Parameters
----------
countries_df : df
dataframe of countries
group_col : str
the column to group the data
Returns
-------
continents_df
grouped dataframe
"""
continents_df = countries_df.drop(drop_cols, axis=1).groupby([group_col, 'WHO Region']).agg('mean').reset_index()
continents_df['Country/Region'] = continents_df['WHO Region']
continents_df['Population'] = population_data['Population'].sum()
return continents_df
def calculate_world_daywise(countries_daywise_df):
"""
Returns the output of calculate_world_statistics()
Parameters
----------
countries_daywise_df : df
dataframe of daily observations
Returns
-------
world_df
output of calculate_world_statistics()
"""
return calculate_world_statistics(countries_daywise_df, 'Date')
def calculate_world_statistics(countries_df, group_col):
"""
Returns dataframe based on a column to group the data by
Parameters
----------
countries_df : df
dataframe of countries
group_col : str
the column to group the data
Returns
-------
world_df
grouped dataframe
"""
world_df = countries_df.drop(drop_cols, axis=1).groupby(group_col).agg('mean').reset_index()
world_df['Country/Region'] = 'World'
world_df['WHO Region'] = 'World'
world_df['Population'] = population_data['Population'].sum()
return world_df
def generate_map(chart_data):
"""
Plots interactive world map
Parameters
----------
chart_data : df
dataframe of filtered data to plot
Returns
-------
plot
map of the world colored by confirmed cases
"""
fig = px.choropleth(chart_data, locations="country_code",
color="Confirmed",
hover_name="Country/Region",
color_continuous_scale='Reds',
projection= 'equirectangular',
labels= {'Confirmed':'Confirmed Cases'},
width = 700,
height = 300
)
fig.update_layout(
geo=dict(
showframe=False,
showcoastlines=False,
projection_type='equirectangular'),
margin={"r":0,"t":20,"l":0,"b":0})
return fig
def load_daily_data():
"""
Reads in data for COVID daily observations
Parameters
----------
None
Returns
-------
dataframe
data for COVID daily observations
"""
return pd.read_csv(os.path.join('data', 'raw', 'full_grouped.csv'))
def load_population_data():
"""
Reads in data for countries and populations
Parameters
----------
None
Returns
-------
dataframe
data for countries and populations
"""
return pd.read_csv(os.path.join('data', 'processed', 'worldometer_data.csv'),
usecols = ['Country/Region','Population'])
def load_country_code_data():
"""
Reads in data for countries and populations
Parameters
----------
None
Returns
-------
gdf
mapped dataframe
"""
name_conversion = {
'East Timor': 'Timor-Leste',
'Republic of the Congo': 'Congo (Kinshasa)',
'Ivory Coast': 'Cote d\'Ivoire',
'Macedonia': 'North Macedonia',
'Myanmar': 'Burma',
'Republic of Serbia': 'Serbia',
'Taiwan': 'Taiwan*',
'The Bahamas': 'Bahamas',
'United Republic of Tanzania': 'Tanzania',
'United States of America': 'US'
}
shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
gdf.columns = ['country', 'country_code', 'geometry']
gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)
return gdf
def join_population_data(daily_data, population_data):
"""
Merges daily_data and population_data dataframes
Parameters
----------
daily_data : df
dataframe of daily observation
population_data : df
dataframe of population
Returns
-------
merged df
merged dataframe from daily_data and population_data
"""
return daily_data.merge(population_data, how = 'left', on = 'Country/Region')
def join_country_code_data(daily_data, country_code_data):
"""
Merges daily_data and country_code_data dataframes
Parameters
----------
daily_data : df
dataframe of daily observation
country_code_data : df
dataframe of country codes
Returns
-------
merged df
merged dataframe from daily_data and country_code_data
"""
#new columns: country, country_code, geometry
return country_code_data.merge(daily_data, left_on = 'country', right_on = 'Country/Region').drop(['country'], axis=1)
alt.data_transformers.disable_max_rows()
prev_vals = {'country': None, 'continent': None}
drop_cols = ['Country/Region', 'Population', 'country_code', 'geometry']
metrics = {
'Population': 'first',
'Confirmed': 'mean',
'Deaths': 'mean',
'Recovered': 'mean',
'Active': 'mean',
'New cases': 'mean',
'New deaths': 'mean',
'New recovered': 'mean',
'WHO Region': 'first'
}
month_data = load_daily_data()
population_data = load_population_data()
country_code_data = load_country_code_data()
countries_daywise_df = join_population_data(month_data, population_data)
countries_daywise_df = join_country_code_data(countries_daywise_df, country_code_data)
continents_daywise_df = calculate_continent_daywise(countries_daywise_df)
world_daywise_df = calculate_world_daywise(countries_daywise_df)
countries = list(set(countries_daywise_df['Country/Region'].tolist()))
continents = list(set(countries_daywise_df['WHO Region'].tolist()))
countries.sort()
continents.sort()
date_range_selection = html.Label([
'Date range selection',
dcc.DatePickerRange(
id='date_selection_range',
min_date_allowed=date(2020, 1, 22),
max_date_allowed=date(2020, 7, 27),
initial_visible_month=date(2020, 1, 22),
start_date=date(2020, 1, 22),
end_date=date(2020, 7, 27),
stay_open_on_select=True,
updatemode='bothdates'
)
])
options_selection = html.Label([
'Display Data',
dcc.RadioItems(
id='select_options',
options=[
{'label': 'Absolute', 'value': 'Absolute'},
{'label': 'Per Capita', 'value': 'Per Capita'},
],
value='Absolute',
labelStyle={'margin-right': '25px'},
inputStyle={'margin-right': '5px'}
) # default option is absolute
])
region_selection = html.Label([
'Selection Mode',
dcc.RadioItems(
id='region_selection',
options=[
{'label': item.name, 'value': item.value} for item in SelectionMode
],
value=SelectionMode.World.value,
labelStyle={'margin-right': '25px'},
inputStyle={'margin-right': '5px'}
)
])
blank_div = html.Div([], id='blank_div')
continent_filter = dcc.Dropdown(
id='continent_filter',
value='Africa',
options=[{'label': continent, 'value': continent} for continent in continents],
multi=True
)
country_filter = dcc.Dropdown(
id='country_filter',
value='Afghanistan',
options=[{'label': country, 'value': country} for country in countries],
multi=True
)
total_cases_linechart = html.Iframe(
id='line_totalcases',
style={'border-width': '0', 'width': '100%', 'height': '300px'}
)
total_death_linechart = html.Iframe(
id='line_totaldeaths',
style={'border-width': '0', 'width': '100%', 'height': '300px'}
)
total_recovered_linechart = html.Iframe(
id='line_totalrecovered',
style={'border-width': '0', 'width': '100%', 'height': '300px'}
)
map = dcc.Graph(
id='world_map',
figure=generate_map(countries_daywise_df)
)
# Setup app and layout/frontend
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
app.layout = dbc.Container([
html.H3('WHO Coronavirus Disease (COVID-19) Dashboard'),
dbc.Row([
dbc.Col([
dbc.Row([
dbc.Col([
region_selection
])]),
dbc.Row([
dbc.Col([
blank_div
])]),
dbc.Row([
dbc.Col([
continent_filter
])]),
dbc.Row([
dbc.Col([
country_filter
])]),
dbc.Row([
dbc.Col([
date_range_selection
])]),
dbc.Row([
dbc.Col([
options_selection
])
])],
md=4),
dbc.Col([
dbc.Col([
map
])
])
]),
dbc.Row([
dbc.Col([
total_cases_linechart
]),
dbc.Col([
total_death_linechart
]),
dbc.Col([
total_recovered_linechart
])
])
])
# Set up callbacks/backend
@app.callback(
Output('line_totalcases', 'srcDoc'),
Output('line_totaldeaths', 'srcDoc'),
Output('line_totalrecovered', 'srcDoc'),
Output('world_map', 'figure'),
Input('region_selection', 'value'),
Input('country_filter', 'value'),
Input('continent_filter', 'value'),
Input('date_selection_range', 'start_date'),
Input('date_selection_range', 'end_date'),
Input('select_options', 'value'))
def filter_plot(mode, country, continent, start_date, end_date, options):
"""
Plots interactive line charts and world map based on filtering features
Parameters
----------
mode : str
mode to filter the plots/map (default is World mode)
country : str
country to filter the plots/map
continent : str
continent to filter the plots/map
start_date : datetime
starting date to filter the plots/map
end_date : datetime
ending date to filter the plots/map
options : str
option to filter the plots/map
Returns
-------
plots & map
filtered plots and map based on filtering features
"""
# Default is World mode
chart_data = world_daywise_df
map_data = countries_daywise_df
print(country, continent)
if mode == SelectionMode.Continents.value:
#Continents mode
if not isinstance(continent, list):
continent = [continent]
chart_data = continents_daywise_df[continents_daywise_df['WHO Region'].isin(continent)]
map_data = map_data[map_data['WHO Region'].isin(continent)]
elif mode == SelectionMode.Countries.value:
# Countries mode
if not isinstance(country, list):
country = [country]
chart_data = countries_daywise_df[countries_daywise_df['Country/Region'].isin(country)]
map_data = chart_data
chart_data = chart_data.query('Date >= @start_date & Date <= @end_date')
map_data = map_data.query('Date >= @start_date & Date <= @end_date')
# fix error when groupby geometry or put it in the aggregate column
temp = map_data.drop(['geometry', 'country_code', 'Date'], axis=1).groupby(['Country/Region']).agg(metrics).reset_index()
map_data = join_country_code_data(temp, country_code_data)
if is_perCapita(options):
for metric in ['Confirmed', 'Deaths', 'Recovered']:
chart_data[metric + '_per_capita'] = chart_data[metric] / chart_data['Population']
map_data[metric + '_per_capita'] = map_data[metric] / map_data['Population']
if is_perCapita(options):
return plot(chart_data, 'Confirmed_per_capita', 'Confirmed Cases Per Capita'), \
plot(chart_data, 'Deaths_per_capita', 'Confirmed Deaths Per Capita'), \
plot(chart_data, 'Recovered_per_capita', 'Confirmed Recoveries Per Capita'), \
generate_map(map_data)
return plot(chart_data, 'Confirmed', 'Confirmed Cases'), \
plot(chart_data, 'Deaths', 'Confirmed Deaths'), \
plot(chart_data, 'Recovered', 'Confirmed Recoveries'), \
generate_map(map_data)
def plot(chart_data, metric, metric_name):
"""
Plots an interactive line chart with the time period on the x axis and selected metric on the y axis
Parameters
----------
chart_data : df
dataframe being plotted
metric : str
metric being examined as specifed in the metrics dictionary
metric_name : str
title of plot that should be outputted
Returns
-------
plot
a line plot with single or multiple lines depending on dataframe selection
"""
chart = (alt.Chart(chart_data).mark_line().encode(
x=alt.X('month(Date):T', title="Month"),
y=alt.Y(f'mean({metric}):Q', title=f'Average {metric_name}', axis=alt.Axis(tickCount=5)),
color=alt.Color('Country/Region', title='Region'))
.properties(title=[f'{metric_name} Over Time'], width=180, height=180))
return (chart + chart.mark_point()).interactive(bind_x=True).to_html()
# will display / show the dropdown list for continents / countries based
# on whether the user selects World / Continents or Countries
@app.callback(
Output('blank_div', 'style'),
Output('continent_filter', 'style'),
Output('country_filter', 'style'),
Input('region_selection', 'value'))
def get_region_dropdown(mode):
"""
Returns dropdown menu values
Parameters
----------
mode : str
mode to filter the plots/map (default is World mode)
Returns
-------
dropdown valuess
"""
print(mode)
if mode == SelectionMode.Continents.value:
return {'display': 'none'}, {'display': 'block'}, {'display': 'none'}
elif mode == SelectionMode.Countries.value:
return {'display': 'none'}, {'display': 'none'}, {'display': 'block'}
return {'height': '35px'}, {'display': 'none'}, {'display': 'none'}
if __name__ == '__main__':
app.run_server(debug=True)
| 2.75
| 3
|
config/api_router.py
|
sonlinux/itez
| 0
|
12781495
|
<gh_stars>0
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from itez.users.api.views import UserViewSet
from itez.beneficiary.api.views import (
ProvinceAPIView,
DistrictAPIView,
ServiceAreaAPIView,
WorkDetailAPIView
)
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("provinces", ProvinceAPIView)
router.register("districts", DistrictAPIView)
router.register('service_area', ServiceAreaAPIView)
router.register('work_detail', WorkDetailAPIView)
app_name = "api"
urlpatterns = router.urls
| 1.765625
| 2
|
iss/south_migrations/0002_auto__del_countrycode.py
|
rerb/iss
| 0
|
12781496
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CountryCode'
db.delete_table('iss_countrycode')
def backwards(self, orm):
# Adding model 'CountryCode'
db.create_table('iss_countrycode', (
('country_name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True)),
('iso_country_code', self.gf('django.db.models.fields.CharField')(max_length=2, unique=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('iss', ['CountryCode'])
models = {
'iss.membership': {
'Meta': {'object_name': 'Membership'},
'current_dues_amount': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'join_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'membership_directory_opt_out': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.Organization']", 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.MembershipProduct']"}),
'receives_membership_benefits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'renewal_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'termination_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'iss.membershipproduct': {
'Meta': {'object_name': 'MembershipProduct'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'iss.organization': {
'Meta': {'object_name': 'Organization'},
'account_num': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'business_member_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'carnegie_class': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'class_profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country_iso': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'enrollment_fte': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'exclude_from_website': ('django.db.models.fields.IntegerField', [], {}),
'is_defunct': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_member': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_signatory': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membersuite_account_num': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membersuite_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'org_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iss.OrganizationType']", 'null': 'True'}),
'picklist_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pilot_participant': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'primary_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'salesforce_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setting': ('django.db.models.fields.CharField', [], {'max_length': '33', 'null': 'True', 'blank': 'True'}),
'stars_participant_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'street1': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'street2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sustainability_website': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'iss.organizationtype': {
'Meta': {'object_name': 'OrganizationType'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['iss']
| 2.171875
| 2
|
tests/__init__.py
|
MassDynamics/protein-inference
| 4
|
12781497
|
import sys, os
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'protein_inference')
if path not in sys.path:
sys.path.append(path)
| 2.359375
| 2
|
tests/test_optimization.py
|
nachovizzo/pyLiDAR-SLAM
| 0
|
12781498
|
<filename>tests/test_optimization.py
import unittest
import torch
from pylidar_slam.common.optimization import PointToPlaneCost, GaussNewton
from pylidar_slam.common.pose import Pose
class MyTestCase(unittest.TestCase):
def test_gauss_newton(self):
tgt_points = torch.randn(2, 100, 3, dtype=torch.float64)
ref_normals = torch.randn(2, 100, 3, dtype=torch.float64)
ref_normals /= ref_normals.norm(dim=-1, keepdim=True)
pose_params = torch.randn(2, 6, dtype=torch.float64) * torch.tensor([[0.01, 0.01, 0.01, 0.001, 0.001, 0.001]],
dtype=torch.float64)
pose = Pose("euler")
ref_points = pose.apply_transformation(tgt_points, pose_params)
gauss_newton = GaussNewton(max_iters=100, norm_stop_criterion=1.e-10, scheme="huber", sigma=0.0001)
cost = PointToPlaneCost()
residual_fun = cost.get_residual_fun(tgt_points, ref_points, ref_normals, pose)
jac_fun = cost.get_residual_jac_fun(tgt_points, ref_points, ref_normals, pose)
est_params, loss = gauss_newton.compute(torch.zeros_like(pose_params), residual_fun, jac_fun=jac_fun)
diff_params = (est_params - pose_params).abs().max()
est_points = pose.apply_transformation(tgt_points, est_params)
diff_points = (ref_points - est_points).abs().max()
self.assertLessEqual(diff_points, 1.e-7)
self.assertLessEqual(diff_params, 1.e-7)
self.assertLessEqual(loss.abs().sum().item(), 1.e-7)
if __name__ == '__main__':
unittest.main()
| 2.3125
| 2
|
indico_toolkit/association/__init__.py
|
IndicoDataSolutions/Indico-Solutions-Toolkit
| 6
|
12781499
|
from .line_items import LineItems
from .extracted_tokens import ExtractedTokens
from .split_merged_values import split_prediction_into_many
from .positioning import Positioning
| 0.992188
| 1
|
daraja_api/utils.py
|
jakhax/python_daraja_api
| 4
|
12781500
|
<filename>daraja_api/utils.py<gh_stars>1-10
import phonenumbers
from daraja_api.conf import AbstractConfig
from daraja_api.exceptions import DarajaException
def format_phone_number(phone_number:str, format='E164'):
try:
phone_number = phonenumbers.parse(phone_number,"KE")
if not phonenumbers.is_valid_number(phone_number):
raise DarajaException("Invalid Phone Number: "+phone_number)
except Exception as e:
raise DarajaException(str(e))
if format=="E164":
p = phonenumbers.format_number(phone_number,
phonenumbers.PhoneNumberFormat.E164)
return p[1:]
elif format=="national":
return phonenumbers.format_number(phone_number,
phonenumbers.PhoneNumberFormat.national)
def base_url(config:AbstractConfig):
environment = config.get_environment()
urls = {"sandbox":"https://sandbox.safaricom.co.ke",
"production":"https://api.safaricom.co.ke"}
return urls[environment]
| 2.71875
| 3
|
py_hardway/ex7_s.py
|
missulmer/Pythonstudy
| 0
|
12781501
|
<reponame>missulmer/Pythonstudy
# study drills
# the next three lines print the quoted strings.
# where the %s appears it prints the variable defined in short quotes defined at the
# end of the line
print "Mary had a little lamb."
print "Its fleece was white as %s." 'snow'
print "And everywhere that Mary went."
# what did that do?
print "." * 10
# this printed 10 of the quoted char at the end of the line.
# this defines variables as char, but really they are very short strings.
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
# watch the comma at the end. try removing it and see what happens.
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
# These added together with (+) cause this to print Cheese Burger.
# It is the comma that creates the space between the words
# take it away and they run together.
| 4.15625
| 4
|
GDSII_Library.py
|
UUhy/PythonGDSII
| 6
|
12781502
|
#!/usr/bin/env ipython
import numpy as np
import re
import datetime as dt
import copy
from GDSII import GDSII
from GDSII_Structure import GDSII_Structure
class GDSII_Library(GDSII):
'''
GDSII_Library class : subclass of GDSII
GDSII Stream file format release 6.0
Library record
The library record is a container for all structure records. The library
record must contain at least 1 structure records. Structure records are
containers for element record(s) such as:
boundary
structure reference
array of structure reference
text
box
path
node
The GDSII_Library class supports the following functions:
setLibrary = Set the library name and units
addStructure = Adds a structure to the library
addBoundary = Adds a boundary to a structure
addARef = Adds an array of structure reference to a
structure
addSRef = Adds a structure reference to a structure
addBox = Adds a box to a structure
addPath = Adds a path to a structure
addText = Adds a text to a structure
addNode = Adds a node to a structure
readFile = Reads a *.gds file into the library
writeFile = Writes the library into a *.gds file
genHierarchy = Creates a hierarchy tree
<NAME>, UH, May 2013
'''
def __init__(self, libraryName = 'UHNano'):
super(GDSII_Library,self).__init__()
self._version = 600
self._dom = self.getDate()
self._doa = self.getDate()
self._libraryName = libraryName
self._userUnit = 0.000001
self._dbUnit = 1000
self._unit = 0.000000001 #userUnit/dbUnit
self._structureName = []
self._structure = []
self._cVersion = 0x0002
self._cLibrary = 0x0102 #Library begin
self._cLibraryName = 0x0206 #Library name
self._cUnit = 0x0305 #Library unit
self._cLibraryEnd = 0x0400 #Library end
self._cStructure = 0x0502 #Structure start
self._cStructureEnd = 0x0700 #Structure end
def __repr__(self):
print 'Library record'
print 'libraryName: ' , self.libraryName
print 'structure: ' , len(self.structure)
print 'structureName: ' , self.structureName
return ''
@property
def version(self):
'''
version : integer (constant)
GDSII version number
0 Version 0
3 Version 3
4 Version 4
5 Version 5
600 Version 6
'''
return self._version
@version.setter
def version(self, val):
if not val in [0,3,4,5,600]:
raise ValueError('GDSII_Library.version : This parameter must be in the set [0,3,4,5,600]')
self._version = val
@property
def dom(self):
'''
dom : list of 6 integers
Date of modification
'''
return self._dom
@dom.setter
def dom(self, val):
if not isinstance(val,list):
raise TypeError('GDSII_Library.dom : This parameter must be a list of integers')
if not len(val) == 6:
raise TypeError('GDSII_Library.dom : This parameter must have 6 elements')
self._dom = val
@property
def doa(self):
'''
doa : list of 6 integers
Date of access
'''
return self._doa
@doa.setter
def doa(self, val):
if not isinstance(val,list):
raise TypeError('GDSII_Library.doa : This parameter must be a list of integers')
if not len(val) == 6:
raise TypeError('GDSII_Library.doa : This parameter must have 6 elements')
self._doa = val
@property
def libraryName(self):
'''
libraryName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
'''
return self._libraryName
@libraryName.setter
def libraryName(self, val):
if not isinstance(val,str):
raise TypeError('GDSII_Library.libraryName : This parameter must be of type str')
if len(val) > 32:
raise ValueError('GDSII_Library.libraryName : This parameter cannot be longer than 32 characters')
regex = re.compile('[\W^?^$]')
if not regex.search(val) == None:
raise ValueError('GDSII_Library.libraryName : This parameter must contain only the following characters: A-Z, a-z, 0-9, _, ? and $')
self._libraryName = val
@property
def userUnit(self):
'''
userUnit : float
Specify user units in [m]
'''
return self._userUnit
@userUnit.setter
def userUnit(self, val):
self._userUnit = float(val)
@property
def dbUnit(self):
'''
dbUnit : integer
Specify number of database units per user unit
'''
return self._dbUnit
@dbUnit.setter
def dbUnit(self, val):
self._dbUnit = val
@property
def unit(self):
'''
unit : float
'''
return self._unit
@property
def structure(self):
'''
structure : GDSII_Structure
A list of GDSII_Structure
'''
return self._structure
@structure.setter
def structure(self, val):
if not isinstance(val, GDSII_Structure):
raise TypeError('GDSII_Library.structure : This parameter must be a GDSII_Structure object')
self._structure.append(val)
@property
def structureName(self):
'''
structureName : list of string
Each list element is the name of a structure
The name may contain up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
'''
return self._structureName
@structureName.setter
def structureName(self, val):
if not isinstance(val,str):
raise TypeError('GDSII_Library.structureName : This parameter must be of type str')
if len(val) > 32:
raise ValueError('GDSII_Library.structureName : This parameter cannot be longer than 32 characters')
regex = re.compile('[\W^?^$]')
if not regex.search(val) == None:
raise ValueError('GDSII_Library.structureName : This parameter must contain only the following characters: A-Z, a-z, 0-9, _, ? and $')
if val in self._structureName:
raise ValueError('GDSII_Library.structureName : A structure with the same name already exist in the library')
self._structureName.append(val)
@property
def cVersion(self):
'''
cVersion : 0x0002
Command code for version
'''
return self._cVersion
@property
def cLibrary(self):
'''
cLibrary : 0x0102
Command code for library begin
'''
return self._cLibrary
@property
def cLibraryEnd(self):
'''
cLibraryEnd : 0x0400
Command code for library end
'''
return self._cLibraryEnd
@property
def cLibraryName(self):
'''
cLibraryName : 0x0206
Command code for library name
'''
return self._cLibraryName
@property
def cUnit(self):
'''
cUnit : 0x0305
Command code for units
'''
return self._cUnit
@property
def cStructure(self):
'''
cStructure : 0x0502
Command code for structure begin
'''
return self._cStructure
@property
def cStructureEnd(self):
'''
cStructureEnd : 0x0700
Command code for structure end
'''
return self._cStructureEnd
def getDate(self):
'''
getDate()
Returns the time and date as a list
Returns
-------
out : list of integers
The current date and time in the form:
[year month day hour minute second]
'''
tmp = dt.datetime.now()
return [tmp.year,tmp.month,tmp.day,tmp.hour,tmp.minute,tmp.second]
def setLibrary(self, libraryName, userUnit = 0.000001, dbUnit = 1000):
'''
setLibrary(libraryName, userUnit = 0.001, dbUnit = 1000)
Set the libraray parameters
Parameters
----------
libraryName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
userUnit : float
Specify user units in units of [m]
dbUnit : integer
Specify database units
'''
self.libraryName = libraryName
self._userUnit = userUnit
self._dbUnit = dbUnit
def addStructure(self, val):
'''
addStructure(val)
Adds a structure to the library
Parameters
----------
val : GDSII_Structure or string
A structure object or structure name
'''
if isinstance(val,GDSII_Structure):
self.structureName = val.structureName
self.structure = val
elif isinstance(val,str):
self.structureName = val
self.structure = GDSII_Structure(val)
else:
raise TypeError('GDSII_Library.addStructure : The input parameter must be either a string or a GDSII_Structure')
def addBoundary(self, structureName, xy, layer=0, datatype=0):
'''
addBoundary(structureName, xy, layer=0, datatype=0)
Adds a boundary element to a structure
Parameters
----------
structureName : string
Name of structure
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
datatype : integer from 0 to 255
The datatype number
'''
try:
self.structure[self.structureName.index(structureName)].addBoundary(xy,layer,datatype)
except:
raise ValueError('GDSII_Library.addBoundary() : The structureName does not exist in the library')
def addSRef(self, structureName, referenceName, xy, reflection = 0, mag = 1, angle = 0):
'''
addARef(structureName, referenceName, xy, reflection = 0, mag = 1, angle = 0)
Adds an structure reference element to a structure
Parameters
----------
structureName : string
Name of structure
referenceName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the structure reference
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in units of [degrees] used to rotate the referenced structure
counterclockwise about the origin
'''
try:
self.structure[self.structureName.index(structureName)].addSRef(referenceName, xy, reflection, mag, angle)
except:
raise ValueError('GDSII_Library.addSRef() : The structureName does not exist in the library')
def addARef(self, structureName, referenceName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0):
'''
addARef(structureName, referenceName, xy, pitchX, pitchY, nX, nY, xRot = 0, yRot = 0, reflection = 0, mag = 1, angle = 0)
Adds an array reference element to a structure
Parameters
----------
structureName : string
Name of structure
referenceName : string
Name of the cell to reference
Up to 32 characters
Characters must be from the set [A-Z,a-z,0-9,_,?,$]
xy : numpy.ndarray of type numpy.int32 with 2 elements or list of 2 integer elements
The origin, [x y], of the array reference
pitchX : integer
Array pitch or step along X
pitchY : integer
Array pitch or step along Y
nX : integer
Array repeats along X
nY : integer
Array repeats along Y
xRot : float
Array x angle in units of [degrees]
yRot : float
Array y angle in units of [degrees]
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in units of [degrees] used to rotate the referenced structure
counterclockwise about the origin
'''
try:
self.structure[self.structureName.index(structureName)].addARef(referenceName, xy, pitchX, pitchY, nX, nY, xRot, yRot, reflection, mag, angle)
except:
raise ValueError('GDSII_Library.addARef() : The structureName does not exist in the library')
def addPath(self, structureName, xy, layer=0, datatype=0, width=None, pathtype=None):
'''
addPath(structureName, xy, layer=0, datatype=0, width=None, pathtype=None)
Adds a path element to a structure
Parameters
----------
structureName : string
Name of structure
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
datatype : integer from 0 to 255
The datatype number
width : integer (nonzero)
Width of the path
pathtype : integer from the set [0,1,2]
Describe the nature of the path segment ends
0 Square ends at path terminal
1 Rounded ends at path terminal
2 Square ends that overlap terminals by one-half the width
'''
try:
self.structure[self.structureName.index(structureName)].addPath(xy, layer, datatype, width, pathtype)
except:
raise ValueError('GDSII_Library.addBoundary() : The structureName does not exist in the library')
def addBox(self, structureName, xy, layer=0, boxtype=0):
'''
addBox(structureName, xy, layer=0, boxtype=0)
Adds a box element to a structure
Parameters
----------
structureName : string
Name of structure
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a box in the form
[x1 y1 x2 y2 x3 y3 x4 y4 x1 y1]
layer : integer from 0 to 255
The layer number
boxtype : integer from 0 to 255
The boxtype number
'''
try:
self.structure[self.structureName.index(structureName)].addBox(xy, layer, boxtype)
except:
raise ValueError('GDSII_Library.addBox() : The structureName does not exist in the library')
def addText(self, structureName, text, xy, layer = 0, texttype = 0, presentation = None, pathtype = None, width = None, reflection = 0, mag = 1, angle = 0):
'''
addText(structureName, text, xy, layer = 0, texttype = 0, presentation = None, pathtype = None, width = None, reflection = 0, mag = 1, angle = 0)
Adds a text element to a structure
Parameters
----------
structureName : string
Name of structure
text : string
A text string
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of a polygon in the form
[x1 y1 x2 y2 ... xn yn x1 y1]
layer : integer from 0 to 255
The layer number
texttype : integer from 0 to 255
The texttype number
presentation : integer
Specifies the font in bits
Bit Number (0-15)
10-11 Specify Font
12-13 Vertical presentation
0 Top
1 Middle
2 Bottom
14-15 Horizontal presentation
0 Top
1 Middle
2 Bottom
pathtype : integer from the set [0,1,2]
Describe the nature of the text segment ends
0 Square ends at text terminal
1 Rounded ends at text terminal
2 Square ends that overlap terminals by one-half the width
width : integer
Defines the width of the text. If width is negative, it will be
independent of any structure scaling
reflection : integer from [0,1]
Reflection enable for reflection about the X axis
mag : float
Magnification factor used to scaled the referenced structure
angle : float
Angle in degrees counterclockwise used to rotate the referenced
structure about the origin
'''
try:
self.structure[self.structureName.index(structureName)].addText(text, xy, layer, texttype, presentation, pathtype, width, reflection, mag, angle)
except:
raise ValueError('GDSII_Library.addText() : The structureName does not exist in the library')
def addNode(self, structureName, xy, layer = 0, nodetype = 0):
'''
addNode(structureName, xy, layer = 0, nodetype = 0)
Adds a node element to a structure
Parameters
----------
structureName : string
Name of structure
xy : numpy.ndarray of type numpy.int32 or a list of integers
An array containing the verticies of an electrical net in the form
[x1 y1 x2 y2 ... x50 y50]
layer : integer from 0 to 255
The layer number
nodetype : integer from 0 to 255
The nodetype number
'''
try:
self.structure[self.structureName.index(structureName)].addNode(xy, layer, nodetype)
except:
raise ValueError('GDSII_Library.addNode() : The structureName does not exist in the library')
def genRecord(self):
'''
genRecord()
Generates the structure record binary
Description
-----------
The structure record is specified by records in the following order:
Library
LibraryName
Units
StructureRecords
Boundary Element (optional)
SRef element (optional)
ARef element (optional)
Path element (optional)
Text element (optional)
Box element (optional)
Node element (optional)
'''
self.recordClear()
#Version
self.record = self.dec2byte(6)
self.record = self.dec2byte(self.cVersion)
self.record = self.dec2byte(self.version)
#Library start
self.record = self.dec2byte(28)
self.record = self.dec2byte(self.cLibrary)
for i in self.dom:
self.record = self.dec2byte(i)
for i in self.doa:
self.record = self.dec2byte(i)
#Define library name
if len(self.libraryName)%2 == 1:
self.record = self.dec2byte(len(self.libraryName)+5)
else:
self.record = self.dec2byte(len(self.libraryName)+4)
self.record = self.dec2byte(self.cLibraryName)
self.record = np.array([ord(i) for i in self.libraryName],dtype=np.uint8)
if len(self.libraryName)%2 == 1:
self.record = np.zeros(1,dtype=np.uint8)
#Define units
self.record = self.dec2byte(20)
self.record = self.dec2byte(self.cUnit)
self.record = self.dec2fbin(self.userUnit*self.dbUnit)
self.record = self.dec2fbin(self.userUnit/self.dbUnit)
#Add structure records
for i in self.structure:
i.genRecord()
self.record = i.record
#Library end
self.record = self.dec2byte(4)
self.record = self.dec2byte(self.cLibraryEnd)
self.recordClip()
def readRecord(self, record):
'''
readRecord(record)
Reads the boundary record and updates the boundary element parameters
'''
self.pointer = 0
#Version
if self.byte2dec(record[self.opCodePointer]) == self.cVersion:
self.version = self.byte2dec(record[self.pointer+4:self.pointer+6])
self.pointer += 6
else:
raise ValueError('GDSII_Library.readRecord() : The GDSII version is not defined')
#Library record
if self.byte2dec(record[self.opCodePointer]) == self.cLibrary:
self.dom[0] = self.byte2dec(record[self.pointer+4:self.pointer+6])
self.dom[1] = self.byte2dec(record[self.pointer+6:self.pointer+8])
self.dom[2] = self.byte2dec(record[self.pointer+8:self.pointer+10])
self.dom[3] = self.byte2dec(record[self.pointer+10:self.pointer+12])
self.dom[4] = self.byte2dec(record[self.pointer+12:self.pointer+14])
self.dom[5] = self.byte2dec(record[self.pointer+14:self.pointer+16])
self.doa[0] = self.byte2dec(record[self.pointer+16:self.pointer+18])
self.doa[1] = self.byte2dec(record[self.pointer+18:self.pointer+20])
self.doa[2] = self.byte2dec(record[self.pointer+20:self.pointer+22])
self.doa[3] = self.byte2dec(record[self.pointer+22:self.pointer+24])
self.doa[4] = self.byte2dec(record[self.pointer+24:self.pointer+26])
self.doa[5] = self.byte2dec(record[self.pointer+26:self.pointer+28])
self.pointer += 28
else:
raise ValueError('GDSII_Library.readRecord() : The record is not a library record')
#Library name
if self.byte2dec(record[self.opCodePointer]) == self.cLibraryName:
length = self.byte2dec(record[self.pointer:self.pointer+2])
if record[self.pointer+length-1] == 0:
self.libraryName = ''.join([chr(i) for i in record[self.pointer+4:self.pointer+length-1]])
else:
self.libraryName = ''.join([chr(i) for i in record[self.pointer+4:self.pointer+length]])
self.pointer += length
else:
raise ValueError('GDSII_Library.readRecord() : The library name is not defined')
#Units
if self.byte2dec(record[self.opCodePointer]) == self.cUnit:
umd = self.fbin2dec(record[self.pointer+4:self.pointer+12])
udd = self.fbin2dec(record[self.pointer+12:self.pointer+20])
self.userUnit = np.sqrt(umd*udd)
self.dbUnit = int(np.sqrt(umd/udd))
self.pointer += 20
else:
raise ValueError('GDSII_Library.readRecord() : The GDSII units is not defined')
#Structures
while not self.byte2dec(record[self.opCodePointer]) == self.cLibraryEnd:
#Retrieve one structure record
tp = self.pointer
tc = [tp+2,tp+3]
while not self.byte2dec(record[tc]) == self.cStructureEnd:
tp += self.byte2dec(record[tp:tp+2])
tc = [tp+2,tp+3]
tp += 4
#Read structur record
S = GDSII_Structure()
S.readRecord(record[self.pointer:tp])
self.addStructure(S)
#Point to next structure
self.pointer = tp
def readFile(self, filename):
'''
readFile(filename)
Reads a GDSII file and populate the library object
Parameters
----------
filename : string
Name of GDSII file
'''
if not isinstance(filename,str):
raise TypeError('GDSII_Library.readFile() : The filename must be a string')
if filename[-4:].lower() == '.gds':
filename = filename[:-4]
f = open(filename + '.gds','rb')
record = np.fromfile(f, dtype=np.uint8)
f.close()
self.readRecord(record)
def writeFile(self, filename):
'''
writeFile(filename)
Writes the library object to a file
Parameters
----------
filename : string
Name of GDSII file
'''
if not isinstance(filename,str):
raise TypeError('GDSII_Library.readFile() : The filename must be a string')
if filename[-4:].lower() == '.gds':
filename = filename[:-4]
f = open(filename + '.gds','wb')
self.genRecord()
f.write(self.record)
f.close()
def genHierarchyTree(self, structureName):
'''
genHierarchyTree(structureName)
Generates the hierarchy tree for the specified struture in the library
Parameters
----------
structureName : string
name of the structure
Results
-------
hierarchyList : list of list of integer
Each sub-list contains the indices for a branch of the structure
hierarchy
The first index in each list refers to structureName
branchIndexList : list of NxM numpy.ndarray of dtype numpy.int32
Each sub-list contain a sets of indices for each branch
The indices are encoded such that
-1,-2,-n refers to an ARef index of 0, 1, n-1
1,2,n refers to an SRef index of 0, 1, n-1
branchRepeatNumber : list of integers
The repeat number of a branch
Note
----
Maximum number of repeat is 256
'''
try:
nameList, indexList = self.recursiveBranching(structureName,self.structureName.index(structureName))
hierarchyList, accessList = self.branchDecode(nameList, indexList)
uniqueHierarchyList = [hierarchyList[0]]
tmpIndexList = [accessList[0]]
branchIndexList = []
branchRepeatNumber = [1]
uniqueHierarchyList = [hierarchyList[0]]
for i in range(1,len(hierarchyList)):
if hierarchyList[i] == hierarchyList[i-1]:
branchRepeatNumber[-1] += 1
tmpIndexList.append(accessList[i])
else:
uniqueHierarchyList.append(hierarchyList[i])
branchRepeatNumber.append(1)
branchIndexList.append(np.array(tmpIndexList,dtype=np.int32))
tmpIndexList = [accessList[i]]
branchIndexList.append(np.array(tmpIndexList,dtype=np.int32))
return uniqueHierarchyList, branchIndexList, branchRepeatNumber
except:
raise ValueError('GDSII_Library.genHierarchyTree : The specified cell does not have a tree')
def recursiveBranching(self, referenceName, index):
'''
recursiveBranching(referenceName)
Recursively finds the branches of a structure hierarchy
Parameters
----------
referenceName : string
Name of the first structure in the hierarchy
Results
-------
nameList : list of strings
An encoded list of structure names representing the branches of a
hierarchy tree
indexList : list of integers
An encoded list of structure indices representing the branches of a
hierarchy tree
typeList : list of reference type
An encoded list of reference type for the corresponding indexList
Description:
This method recursively steps through the specified structure and
generates a list of all referenced structure. The list is encoded to
optimize the branching algorithm. For example, the tree below
a
b c
c c d
d d
will result in a list:
['a', 'b', 'c', 'd', 'd', 'c', 'c', 'd', 'd', 'c', 'b', 'c', 'd', 'd', 'c', 'a']
A structure name will always encapsulate all referenced structure.
A structure name that encapsulates nothing means that it does not
contain a structure reference.
'''
nameList = [referenceName]
indexList = [index]
#Recursively step through each referenced structure
for i in range(len(self.structure[self.structureName.index(referenceName)].sref)):
nameTmp, indexTmp = self.recursiveBranching(self.structure[self.structureName.index(referenceName)].sref[i].referenceName, i+1)
nameList.extend(nameTmp)
indexList.extend(indexTmp)
for i in range(len(self.structure[self.structureName.index(referenceName)].aref)):
nameTmp, indexTmp = self.recursiveBranching(self.structure[self.structureName.index(referenceName)].aref[i].referenceName, -i-1)
nameList.extend(nameTmp)
indexList.extend(indexTmp)
#Add referenceName to the end of the list
nameList.append(referenceName)
indexList.append(index)
return nameList, indexList
def branchDecode(self, nameList, indexList):
'''
branchDecode(nameList, indexList)
Decodes the name list from the recursiveBranch method
Parameters
----------
nameList : List of strings
An encoded structure reference name list from the recursiveBranch
method
indexList : List of integers
An encoded index list from the recursiveBranch method
Results
-------
hierarchyList : A list of a list of integer indices
Each sub-list is a single branch of the hierarchy tree
accessList : A list of a list of integer indices
Each sub-list is a code for accessing each branch of the tree
The first element is the index of the top structure
The remaining elements are codes that refer to SRef or ARef indices
to access the referenced structure as described by the
hierarchyList
A positive integer codes for SRef indices
A negative integer codes for ARef indices
Since zero cannot be negative, all indices begin with 1
Example: SRef[2] will be coded as 3
ARef[0] will be coded as -1
Note
----
This method supports only 256 levels of hierarchy or structure nests
'''
#Convert a list of structure names to a list of structure indices
structureIndexList = np.array([self.structureName.index(i) for i in nameList],dtype=np.uint32)
#A list that tracks the branch progression
tmpStructureIndexList = [structureIndexList[0]]
tmpAccessIndexList = [indexList[0]]
#Tracks if the branch is progressing forward (deeper) or backwards
forward = True
#A list of completed branches
hierarchyList = []
accessList = []
#Steps through the indexList and extract each branch of the hierarchy
for i in range(1,len(structureIndexList[1:])):
if not structureIndexList[i] == tmpStructureIndexList[-1]:
tmpStructureIndexList.append(structureIndexList[i])
tmpAccessIndexList.append(indexList[i])
forward = True
else:
if forward:
hierarchyList.append(copy.copy(tmpStructureIndexList))
accessList.append(copy.copy(tmpAccessIndexList))
forward = False
tmpStructureIndexList.pop()
tmpAccessIndexList.pop()
return hierarchyList, accessList
def test():
# a = GDSII_Library('libraryTest')
# a.addStructure('a')
# a.addStructure('b')
# a.addStructure('c')
# a.addStructure('d')
# a.addStructure('e')
# a.addStructure('f')
# a.addBoundary('a',[0,0,0,5,5,5,5,0],2,1)
# a.addBoundary('a',[10,0,10,5,15,5,15,0],2,2)
# a.addBoundary('a',[20,0,20,5,25,5,25,0],2,3)
# a.addBoundary('a',[0,10,0,15,5,15,5,10],2,4)
# a.addBoundary('a',[10,10,10,15,15,15,15,10],2,5)
# a.addBoundary('b',[20,10,20,15,25,15,25,10],2,6)
# a.addBoundary('b',[0,20,0,25,5,25,5,20],2,7)
# a.addBoundary('b',[10,20,10,25,15,25,15,20],2,8)
# a.addBoundary('b',[20,20,20,25,25,25,25,20],2,9)
# a.addText('a','UHNano',xy=[0,0])
# a.addPath('a',[0,0,1,1,2,2],0,0)
# a.addBox('a',[0,0,0,10,10,10,10,0,0,0],0,0)
# a.addNode('a',[0,0,20,20],255,255)
# a.addSRef('a','b',[0,0])
# a.addSRef('a','c',[0,0])
# a.addSRef('a','e',[0,0])
# a.addSRef('b','c',[0,0])
# a.addSRef('b','c',[10,10])
# a.addSRef('c','d',[0,0])
# a.addSRef('e','b',[0,0])
# a.addSRef('e','d',[0,0])
# a.addSRef('e','d',[0,0])
# a.addSRef('e','d',[0,0])
# a.addSRef('f','b',[0,0])
# a.addSRef('f','c',[0,0])
# a.addSRef('f','d',[0,0])
# a.genRecord()
# aHList, abrNum = a.genHierarchyTree('a')
# b = GDSII_Library()
# b.readRecord(a.record)
#
# print a
# print aHList
# print abrNum
# print b
c = GDSII_Library('KLayout_Example')
c.readFile('KLayout_Example')
hList, iList, brNum = c.genHierarchyTree('main')
print c
print hList
print brNum
c.writeFile('GDSII_Write.gds')
if __name__ == '__main__':
test()
| 2.5
| 2
|
introduction_frechette/models.py
|
Charlotte-exp/Multichannel-Games
| 0
|
12781503
|
<filename>introduction_frechette/models.py
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import itertools
author = 'Charlotte'
doc = """
The instructions and consent for the control treatment.
I decided to have them in a separate app so that there can be a waiting page at the beginning of the game app
where participants wait for an opponent to play (online).
So the pairing cannot happen before then. Hence a separate app.
"""
class Constants(BaseConstants):
name_in_url = 'introduction_frechette'
players_per_group = None
num_rounds = 1
min_rounds = 1
proba_next_round = 0.75
session_time = 15
conversion = '20pts = £0.05'
"""
Matrix format payoffs
temptation = betray, sucker = betrayed, reward = both cooperate, punishment = both defect
"""
temptation = c(50)
sucker = c(12)
reward = c(26)
punishment = c(25)
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
pass
| 2.546875
| 3
|
q3/q3/drivers/driverBase.py
|
virtimus/makaronLab
| 2
|
12781504
|
<reponame>virtimus/makaronLab
class Q3DriverBase:
def __init__(self,_self,parent,impl):
self._object = impl
self._impl = impl
self._parent = parent
self._self = _self
#@deprecated - use self
def s(self):
return self._self
def self(self):
return self._self
def impl(self):
return self._impl
def pimpl(self):
return self._parent.impl()
def p(self):
return self._parent
def callAfterInit(self, tImpl):
if hasattr(tImpl,'__afterInit__'):
ai = getattr(tImpl,'__afterInit__')
if callable(ai):
ai(self)
| 2.328125
| 2
|
python_code/data_scraping.py
|
brschneidE3/LegalNetworks
| 2
|
12781505
|
"""
ADD A DESCRIPTION OF WHAT THIS FILE IS FOR
"""
__author__ = 'brsch'
import helper_functions
import os
import time
import json
proj_cwd = os.path.dirname(os.getcwd())
data_dir = proj_cwd + r'/data'
# Unchanging courtlistener API info
site_dict_url = 'https://www.courtlistener.com/api/rest/v3/'
site_dict = helper_functions.url_to_dict(site_dict_url)
# clusters_url = site_dict['clusters']
starting_page = 2161
clusters_url = 'https://www.courtlistener.com/api/rest/v3/clusters/?page=%s' % starting_page
opinions_url = site_dict['opinions']
def pull_cases_by_court(court_id):
"""
Given a court id, pull_cases_by_court will iterate through every cluster online, check if the case (cluster &
opinion) corresponding to that cluster has been downloaded ( via is_downloaded() ) and will build up a list of
[case_number, parent_directory] tuples that will enable quick iteration and creation of a dictionary of cases
in other functions via Class_Case.json_to_case.
:param court_id: court id by which to filter
:return:
case_numbers_and_parent_directories:
list of the case numbers and the parents directories where they are located
"""
start = time.time()
# Initial info about the online clusters database
clusters_dict = helper_functions.url_to_dict(clusters_url)
num_to_inspect = clusters_dict['count']
inspected = 0.
page = starting_page
# List of case numbers and parent directories that will eventually be returns
case_numbers_and_parent_directories = []
# Make sure we already have subdirectories for this court. Create them if not.
subdirectories = [data_dir + r'/clusters/%s' % court_id, data_dir + r'/opinions/%s' % court_id]
for subdir in subdirectories:
if not os.path.isdir(subdir):
os.makedirs(subdir)
# As long as there are more pages to iterate through
while clusters_dict['next'] is not None:
# Iterate through each cluster on current page
for individual_cluster in clusters_dict['results']:
time.sleep(2) # Make sure we don't overload the server
inspected += 1.
pct_inspected = inspected / num_to_inspect
url_id_number = individual_cluster['resource_uri'].rsplit('/')[-2]
new_cluster_url = site_dict_url + r'clusters/%s' % url_id_number
new_cluster_dict = helper_functions.url_to_dict(new_cluster_url)
new_cluster_docket_url = new_cluster_dict['docket']
new_cluster_docket_dict = helper_functions.url_to_dict(new_cluster_docket_url)
try:
new_cluster_court = new_cluster_docket_dict['court'].rsplit('/')[-2]
if new_cluster_court == court_id:
file_number = new_cluster_dict['citation_id']
parent_directory = court_id
case_numbers_and_parent_directories.append([file_number, parent_directory])
if not is_downloaded(file_number, parent_directory):
download_case(new_cluster_dict, file_number, parent_directory)
# print '%s DOWNLOADED. (%s)' % (file_number, pct_inspected)
else:
pass
# print '%s passed. (%s)' % (new_cluster_dict['citation_id'], pct_inspected)
except KeyError: # If there is no court data
pass
print 'page %s checked. (%s)' % (page, pct_inspected)
page += 1
clusters_dict = helper_functions.url_to_dict(clusters_dict['next'])
# Iterate through each cluster on last page
for individual_cluster in clusters_dict['results']:
time.sleep(2) # Make sure we don't overload the server
new_cluster_url = individual_cluster['cluster']
new_cluster_dict = helper_functions.url_to_dict(new_cluster_url)
new_cluster_docket_url = new_cluster_dict['dockets']
new_cluster_docket_dict = helper_functions.url_to_dict(new_cluster_docket_url)
try:
new_cluster_court = new_cluster_docket_dict['court']
if new_cluster_court == court_id:
file_number = new_cluster_dict['citation_id']
parent_directory = court_id
case_numbers_and_parent_directories.append([file_number, parent_directory])
if not is_downloaded(file_number, parent_directory):
download_case(new_cluster_dict, file_number, parent_directory)
except KeyError: # If there is no court data
pass
finish = time.time()
elapsed = finish - start
print "Download took %s minutes" % elapsed / 60.
return case_numbers_and_parent_directories
def is_downloaded(file_number, parent_directory):
"""
Should check if corresponding cluster & opinion json files have been downloaded to appropriate path:
data_dir + r'/clusters/COURT NAME/FILE NUMBER.json
&
data_dir + r'/opinions/COURT NAME/FILE NUMBER.json
Return True or False.
"""
op_downloaded = os.path.exists(data_dir + r'/opinions/%s/%s.json' % (file_number, parent_directory))
cl_downloaded = os.path.exists(data_dir + r'/clusters/%s/%s.json' % (file_number, parent_directory))
return op_downloaded and cl_downloaded
def download_case(cluster_dict, file_number, parent_directory):
# TODO
"""
Download opinion and cluster files if they are missing.
"""
with open(data_dir + r'/clusters/%s/%s.json' % (parent_directory, file_number), 'w') as fp:
json.dump(cluster_dict, fp)
opinion_url = cluster_dict['resource_uri'].replace('clusters', 'opinions')
opinion_dict = helper_functions.url_to_dict(opinion_url)
with open(data_dir + r'/opinions/%s/%s.json' % (parent_directory, file_number), 'w') as fp:
json.dump(opinion_dict, fp)
#############
# SCRIPTING
#############
ids_and_parents = pull_cases_by_court('scotus')
helper_functions.list_to_csv(data_dir + r'/scotus_ids.csv', ids_and_parents)
| 2.859375
| 3
|
parser/team01/analizador_sintactico.py
|
susanliss/tytus
| 0
|
12781506
|
<gh_stars>0
import ply.yacc as yacc
import tempfile
from analizador_lexico import tokens
from analizador_lexico import analizador
from Nodo import *
from expresiones import *
from instrucciones import *
from graphviz import *
# Asociación de operadores y precedencia
# precedence = (
# ('left','MAS'),
# ('left','DIVIDIDO'),
# ('right'),
# )
nombres = {}
resultado_gramatica = []
dot = Digraph(comment='The Round Table')
i = 0
def inc():
global i
i += 1
return i
def p_statement(t):
'''statement : insert_statement
| update_statement
'''
t[0] = Nodo("statement", [t[1]], 'N', None)
# *************************************************
# ********************** insert_statement ***********
# *************************************************
# region INSERT
def p_insert_statement(t):
'''insert_statement : INSERT INTO table_name insert_columns_and_source PTCOMA
'''
temp = list()
temp.append(t[3])
temp.append(t[4])
t[0] = Nodo("insert_statement", temp, 'N', None)
def p_table_name(t):
'table_name : ID '
t[0] = Nodo("table_name", [t[1]], 'S', str(t[1]))
# *************************************************
# ********************** insert_columns_and_source ***********
# *************************************************
def p_insert_columns_and_source(t):
'''insert_columns_and_source : PARIZQ insert_column_list PARDER VALUES query_expression_insert
| VALUES query_expression_insert
| insert_default_values
'''
if (len(t) == 6):
# t[0] =InsertColumnsValues(t[2],t[5])
temp = list()
temp.append(t[2])
temp.append(t[5])
t[0] = Nodo("insert_columns_and_source", temp, 'N', None)
elif (len(t) == 3):
# t[0] =InsertValues(t[1],t[2])
t[0] = Nodo("insert_columns_and_source", [t[2]], 'N', None)
elif (len(t) == 2):
# t[0] =InsertValues(t[1],t[2])
t[0] = Nodo("insert_columns_and_source", [t[1]], 'N', None)
def p_insert_defaul_values(t):
'insert_default_values : DEFAULT VALUES'
t[0] = Nodo("column_name", [t[1]], 'S', str(t[1]))
# *************************************************
# ********************** insert_column_list ***********
# *************************************************
def p_insert_column_list(t):
'insert_column_list : insert_column_list COMA column_name'
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("insert_column_list1", temp, 'N', None)
def p_insert_column_name(t):
'insert_column_list : column_name'
# t[0] = [t[1]]
t[0] = Nodo("insert_column_list2", [t[1]], 'N', None)
def p_column_name(t):
'column_name : ID '
t[0] = Nodo("column_name", [t[1]], 'S', str(t[1]))
# *************************************************
# ********************** query_expression_insert ***********
# *************************************************
def p_query_expression_insert(t):
'''query_expression_insert : insert_list
'''
t[0] = Nodo("query_expression_insert", [t[1]], 'N', None)
def p_insert_list(t):
'''insert_list : insert_list COMA insert_value_list '''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("insert_list2", temp, 'N', None)
def p_insert_item(t):
'''insert_list : insert_value_list '''
t[0] = Nodo("insert_list1", [t[1]], 'N', None)
def p_insert_value_list(t):
'''insert_value_list : PARIZQ value_list PARDER '''
t[0] = Nodo("insert_value_list", [t[2]], 'N', None)
def p_value_list(t):
'''value_list : value_list COMA insert_value'''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("value_list1", temp, 'N', None)
def p_insert_value(t):
'''value_list : insert_value'''
t[0] = Nodo("value_list1", [t[1]], 'N', None)
def p_valorasign(t):
'''insert_value : ENTERO
| DECIMAL
| CADENACOMSIMPLE
| DEFAULT
'''
t[0] = Nodo("insert_value", [t[1]], 'S', str(t[1]))
# endregion
# *************************************************
# ********************** update_statement ***********
# *************************************************
# region UPDATE
def p_update_statement(t):
'''update_statement : UPDATE table_name SET set_clause_list WHERE search_condition PTCOMA
| UPDATE table_name SET set_clause_list PTCOMA
'''
if (len(t) == 8):
temp = list()
temp.append(t[2])
temp.append(t[4])
temp.append(t[6])
t[0] = Nodo("update_statement", temp, 'N', None)
elif (len(t) == 6):
temp = list()
temp.append(t[2])
temp.append(t[4])
t[0] = Nodo("update_statement", temp, 'N', None)
def p_set_clause_list(t):
'''set_clause_list : set_clause_list COMA set_clause'''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("set_clause_list", temp, 'N', None)
def p_p_set_clause_item(t):
'''set_clause_list : set_clause'''
t[0] = Nodo("set_clause_list", [t[1]], 'N', None)
def p_set_clause(t):
'set_clause : column_name IGUAL update_source'
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("set_clause", temp, 'N', None)
def p_update_source(t):
'''update_source : value_expression
| NULL
'''
t[0] = Nodo("update_source", [t[1]], 'N', None)
# *************************************************
# ********************** search_condition ***********
# *************************************************
def p_search_condition_boolean(t) :
'''search_condition : boolean_term
'''
t[0] = Nodo("search_condition", [t[1]],'N',None)
def p_search_condition(t) :
'''search_condition : search_condition OR boolean_term
'''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("search_condition", temp,'N',None)
def p_p_boolean_term_item(t) :
'''boolean_term : boolean_factor
'''
t[0] = Nodo("boolean_term", [t[1]],'N',None)
def p_boolean_term(t) :
'''boolean_term : boolean_term AND boolean_factor
'''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("boolean_term", temp,'N',None)
def p_boolean_factor(t) :
'''boolean_factor : NOT boolean_test
| boolean_test
'''
if (len(t) == 3):
temp = list()
temp.append(t[1])
temp.append(t[2])
t[0] = Nodo("boolean_factor", temp,'N',None)
elif (len(t)==2):
temp = list()
temp.append(t[1])
t[0] = Nodo("boolean_factor", temp,'N',None)
def p_boolean_test(t) :
'''boolean_test : boolean_primary
'''
t[0] = Nodo("boolean_test", [t[1]],'N',None)
def p_boolean_primary(t) :
'''boolean_primary : predicate
| PARIZQ search_condition PARDER
'''
if (len(t) == 2):
t[0] = Nodo("boolean_primary", [t[1]],'N',None)
elif (len(t)==4):
t[0] = Nodo("boolean_primary", [t[2]],'N',None)
def p_predicate(t) :
'''predicate : comparison_predicate
'''
t[0] = Nodo("predicate", [t[1]],'N',None)
def p_comparison_predicate(t) :
'''comparison_predicate : row_value_constructor comp_op row_value_constructor
'''
temp = list()
temp.append(t[1])
temp.append(t[2])
temp.append(t[3])
t[0] = Nodo("comparison_predicate", temp,'N',None)
def p_comp_op(t) :
'''comp_op : IGUAL
| MENQUE MAYQUE
| MENQUE
| MAYQUE
| MENORIGU
| MAYORIGU
'''
if (len(t) == 2):
t[0] = Nodo("comp_op", [t[1]],'S',str(t[1]))
elif (len(t)==3):
temp = list()
temp.append(t[1])
temp.append(t[2])
t[0] = Nodo("comp_op", temp,'N',None)
def p_row_value_constructor(t) :
'''row_value_constructor : row_value_constructor_element
| PARIZQ row_value_constructor_list PARDER
'''
if (len(t) == 2):
t[0] = Nodo("row_value_constructor", [t[1]],'N',None)
elif (len(t)==4):
t[0] = Nodo("row_value_constructor", [t[2]],'N',None)
def p_row_value_constructor_list(t) :
'''row_value_constructor_list : row_value_constructor_list COMA row_value_constructor_element
'''
temp = list()
temp.append(t[1])
temp.append(t[3])
t[0] = Nodo("row_value_constructor_list", temp,'N',None)
def p_row_value_constructor_item(t) :
'''row_value_constructor_list : row_value_constructor_element
'''
t[0] = Nodo("row_value_constructor_list", [t[1]],'N',None)
def p_row_value_constructor_element(t) :
'''row_value_constructor_element : value_expression
| NULL
'''
t[0] = Nodo("row_value_constructor_element", [t[1]],'N',None)
def p_value_expression(t):
'''value_expression : ENTERO
| DECIMAL
| CADENACOMSIMPLE
| DEFAULT
| ID
'''
t[0] = Nodo("value_expression", [t[1]],'S',str(t[1]))
# endregion
# *************************************************
# ********************** Error ***********
# *************************************************
def p_error(t):
global resultado_gramatica
if t:
resultado = "Error sintactico de tipo {} en el valor {} ".format( str(t.type),str(t.value))
print(resultado)
else:
resultado = "Error sintactico {}".format(t)
print(resultado)
resultado_gramatica.append(resultado)
# *************************************************
# ********************** ***********
# *************************************************
# Build the parser
parser = yacc.yacc()
def ejecucion_sintactico(data):
global resultado_gramatica
resultado_gramatica.clear()
gram = parser.parse(data)
if gram:
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d_%m_%Y %H_%M_%S")
print(dot.source)
dot.attr(splines='false')
dot.node_attr.update(shape='circle',fontname='arial',color='blue4',fontcolor='blue4')
dot.edge_attr.update(color='blue4')
dot.render('.\\tempPDF\\'+dt_string+'.gv', view=False) # doctest: +SKIP
'.\\tempPDF\\'+dt_string+'.gv.pdf'
resultado_gramatica.append(gram)
else: print("vacio")
return(resultado_gramatica)
| 2.578125
| 3
|
htsohm/bin/cube_pore_sweep_setup.py
|
mas828/htsohm
| 2
|
12781507
|
<filename>htsohm/bin/cube_pore_sweep_setup.py
#!/usr/bin/env python3
import click
import numpy as np
from sqlalchemy.orm import joinedload
from htsohm import load_config_file, db
from htsohm.db import Material
@click.command()
@click.argument('config-path', type=click.Path())
def sweep_setup(config_path):
config = load_config_file(config_path)
db.init_database(config["database_connection_string"])
session = db.get_session()
directional_atom_sweep_points = config['directional_atom_sweep_points']
sigma_sweep_points = config['sigma_sweep_points']
epsilon_sweep_points = config['epsilon_sweep_points']
scfg = config['structure_parameters']
eps_d = np.linspace(*scfg['epsilon_limits'], epsilon_sweep_points)
sig_d = np.linspace(*scfg['sigma_limits'], sigma_sweep_points)
atoms_d = np.linspace(*scfg['directional_atom_limits'], directional_atom_sweep_points, dtype=int)
atom_diameter = scfg['atom_diameter']
print("epsilons: ", eps_d)
print("sigmas: ", sig_d)
print("num_atoms: ", atoms_d)
for eps in eps_d:
for sig in sig_d:
for num_atoms in atoms_d:
material = Material.cube_pore_new(sig, eps, num_atoms, atom_diameter)
session.add(material)
session.commit()
if __name__ == '__main__':
sweep_setup()
| 2.125
| 2
|
bazelrio/dependencies/wpilib/2022_1_1/deps.bzl
|
noamzaks/bazelrio
| 5
|
12781508
|
<filename>bazelrio/dependencies/wpilib/2022_1_1/deps.bzl
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_jar")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazelrio//:deps_utils.bzl", "cc_library_headers", "cc_library_shared", "cc_library_sources", "cc_library_static")
def setup_wpilib_2022_1_1_dependencies():
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxathena.zip",
sha256 = "3570c0a2a91e3547aae2234479b6f13528685abb5225c140e291cd73418441d5",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "475e890fd43367b851d013aff5ffcfcefae7db33656aee3c6a1367708eafe170",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "69e3bc8e316e483c728221d90d621acdae4741d027f8638ab375a4b1a9ca21cd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "b9428d9b41e5c56ebabec8f29739125c39fd07706ac5dc941648ddfa5cac6be3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-osxx86-64.zip",
sha256 = "04a10cccf42b863dd8c220ba26fc96a0692242453424c103b67eab89181bec3d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "96abb44cf19fe7703618432bea13b7398f8240c8da334c35af11536dcb15ff26",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "f7dddf967e3aea536bf6c0f907984c2df20f0cc921d1023297a30cf553765868",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "b854025344262eb6a9b35e271d4de3fbb76ec317eb3f30143b6be55c16424963",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-headers.zip",
sha256 = "29d9fa0496bf299ec8dc58347fd772230d51ce4f6a85f10951bcc18b20230dde",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-sources.zip",
sha256 = "06b71a61100ba3822aa2fbfa1f6d3d7e796f2c16df82bc8e0f82818f0422b418",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxathena.zip",
sha256 = "f84fe5162cedce9a938d0bf2a0074aa76887e164be681f0d8ca29ddee1df346f",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "c70b6a599f0f4676c39e14784619f1138b9a4ecdf8a4514ebbb8e56de0d34f47",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "01d7a62ccb613e27a84766116c4821409306821d2e49f26d5a86977b57bb6566",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "7d18a354244ed9b31952389c53e9f3f566e97566f8957d3433b16066018cee06",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-osxx86-64.zip",
sha256 = "741b792ad34367ce3315aec8e1ae8f7a7618a3fa33779fbd2f0e5507b56b7d8b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "40a76101beadb6d6163f86f1c5ea0f1fd4a2c5de32813a67908f17dc6963fa64",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "ef02ccabb119bcfcdaabd3d443933b5749767e8810b774948387a798d99fa9b1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "81dc7f890417b43f636df556d86087e8c08a983cf50f8d280ec3b4e0000a2ee8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-headers.zip",
sha256 = "893734817fcbb8b9758d94a972bb5f22da3da628c5ed5da86490ac2d31756051",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-sources.zip",
sha256 = "273cfdeb7750ec220c5b09d7da46e5b402a1a56edc01af3f5b614f5e20795bf2",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxathena.zip",
sha256 = "5d12b4de347ebffe1c7783b01389c11906f17917bd32539c216c5fa6d1ad63e7",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "501ea6a79d24837292b063293dc2230522484b5eb424406f6efd7443484e2058",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "8ebadc5f2b7c6962ed1d3df945806087404212899bcf81ecfbd8dd4f7d5d1302",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "c50f2b002ce2a19bde9a394683d08daba441cd8bc4ae469186df4cd814de42a6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-osxx86-64.zip",
sha256 = "4d0e5d86b6d285682971ad1dc0c350350862fd9adb10db26ae910c38e523a18d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "3331a4fd37e7c05fc409c17992293957de6119fac14a7fae3cf1e0abee3b85cf",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "bf2bc6a7df448c405786ef877e3b8095ebe0677944c4cf2a927fbbbed0c5c3b8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "8431c1981baf3d20d072ed03624f999f3dc02a0f619dd61dae8c4d05d546207a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-headers.zip",
sha256 = "49f6565ace351fe7cb3e591e8a391d8c515214135ab84f516e17293e0cb28764",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-sources.zip",
sha256 = "9d4c76af759e70cf2063958c4d856e7e1b0ea60551bbc245fe0edb041bc0a37d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxathena.zip",
sha256 = "7b8dc317151aa66c0f787f09e49acb9d7aa1dd9f66b2021e9aebb79f3b78b507",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "45b64d7f8801dca368b89a025b94fd564bed7def1786ead44042b187060db054",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "d71ef6b5ce0817474e4d0c4ab92f1c44800516d3a6978c5f33b47eaa7e081a5b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "216bcc5c4e00dae92a6a470ba8920a7b0429a63472277565de7f2b03956f3edd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-osxx86-64.zip",
sha256 = "aba23c0225830808c359497afd61b650fe2a469420862f7ed275478932374dbb",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "39e62c37d97bf90077eedaf78ca5e18cc62db322695fbf5de9bed0b27c2f370a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "9494d27ca80a6a9bbc892e929da97e5c7d37f3788417ec03907169af28b5e815",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "ce070898bd5bad7f48974bbb9bfc5eccbd4b450d0f6dffb8d42d93e13dbd850d",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-headers.zip",
sha256 = "2df197a2301478c32a37634c61b7dd93676e668939eeeab4820a780b29b559c4",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-sources.zip",
sha256 = "f546244b35875f64c9d37e570f098f9977c5cca6d3d7ccc91615b88c8713fe69",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxathena.zip",
sha256 = "895bbf531fe2bcec0b9ad87311779abbb862c73b74016763990eaea46e207246",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "a39dc62b2f96c3c17c69c44356a4980b8789334640b2a43f0a43bfebd8554c2a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "511945f5988e9c3196f706243e5542f8ef0223878a74878104c44ff73ad5fc9b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "63378c9ca80ced1c5f69619c75ac6f230d0962a510131cfb0a898dc3ca0cda78",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-osxx86-64.zip",
sha256 = "c9750d902aec05b007e030c50553bec33c5ed70874671f9d7cdb9180890704c5",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e3276046c92fe815364bab523c4719053bedf1df8eb79f62b89875a4f133490a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "984e6e32fdb80ffbe0b88faa9e88246690309aebf92e27de1d6d4ba7024206f7",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "e96b718fb95b4e14b0b0db4d0ba9de996b95d4c111317237ba4696d8b698e36b",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-headers.zip",
sha256 = "db0cd454cdcfa592248cdfa6703ff5361a84a754e3530e91652aa41078ea0dd0",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-sources.zip",
sha256 = "f120bd9f2e60d1da313017ec72630844fe3cd2232d927caa843053f232b6bd67",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxathena.zip",
sha256 = "2c96983902d1caef823e38f314939d4f192c795541bda0c263af65b0cadd8cb0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "0129b2e3079852f26b2272a7e91a824194f2f6ba345f0c400611fe4888bff03a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "fda5b67cf934647c493e4ae7ebc76fd1801f1ced107e58771af2d6658527d547",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "e4655a46c792013675d42d11506f86d5f6f5bf1d814db6f2aac43e8aa1fdae68",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-osxx86-64.zip",
sha256 = "afc4b26a4e6f4a7456ac3002f681ecb6179c919e11f22b81cfa0581bb0ffe284",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "bcb92cddf2d66ea0a6d86b9dd75b10bdf7dd9c2b219fc4af106a335211e79cfd",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "5edf5f8bfdc88302ea0fb64127f1d13899f2627c51cff5efc5da8fa65e9d4697",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "404d9819c42e071577d27d6e6775958b630047c5ec3591a18781df2bb42e5ac0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-headers.zip",
sha256 = "603adee3cf0cb0dde90d530d29cfdb6f4cc239f3c64c93d0d07867bf2b54e672",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-sources.zip",
sha256 = "bb585c808b383eae4ef1b24e4c879ed8b6a0c1a1e6c040f2b4c1bd0e17adf82b",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxathena.zip",
sha256 = "b660c4dafbb5b30255fce165e857ecc84ebf1e8e588fba0135447232b9cbf2e9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "fc21cd4f72b04f8e6ede40eea2a8bfc5bc0c5c540ffbce4a3370b993fce6fa39",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "857c24ed73c6b11d062e0bd233e8834397ce8ed3986917bb39e90e009a18ff48",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "c2bb50b94194a910c4cadad287dad7656ac1c6a81b2f19c64f6ec0625747ee8d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-osxx86-64.zip",
sha256 = "8b87c660408614111d6c83f38c8c13d05c362ae8549cd47bc5f562a0ce6c8401",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "3518ef84638323b9c90445b1115eec0b2b856fae2ab029083d647f2dee2097e1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "d425c9796b20da2f87ff1c2947a05667e1808c49263698f89c02f6f3b5fcbefe",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "1b3ca858c86428c4c6258c4dcec5fb27c5ef9161fe7ab263224aea186205256d",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-headers.zip",
sha256 = "4e91cd9114e0498fcdda69e73aa323f2178fee4c17d78582c220f058f4999905",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-sources.zip",
sha256 = "46740462856ba6e7804ff5dd5353e946910cfdf802ae80bc306c47aa26c6923e",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxathena.zip",
sha256 = "a0875ce8e545c99a809126e79ffdc3faccc5ed08aaf7822dd1b2f4c288d17fcd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "cf0c6ac0ef86872b7dca198fc1091547d104c8ec853b6569f7677563b97038bb",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "98b640dfb2e1b0282d851022e7119b37686f4c47f951a95ea0854664f5cb45b4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "653b1ed5802e531377af85bad7e42f5dfc50dcae66350bf853c0498e1cf941f1",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-osxx86-64.zip",
sha256 = "c49e989b3730f6ad8b98d3b30edff0e85dd9da08d47b91d4dbbe0829ef9cd29e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e33becf456e85a7c6484f2a8cf86253b9a31bdca9eeb954a522461bc006bdac8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "c312941aafb9bd74e3f4c629a15bc131b51ba11e5d6fba560eb97355dc6349fd",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "050aec1e42262aa3e6d8e3c7c7580749f8a1eedd594dd013d448828716b3d239",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-headers.zip",
sha256 = "fd11c98f1b9242e80e6fc864f8e2cc41a001ad464498c6cc95ccb26ec84fd6a9",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-sources.zip",
sha256 = "eacf13f19c3940ccd35f24aaaffc378bf41779e9072a4ed416374689b0a8ccf2",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxathena.zip",
sha256 = "f64fbdfdf82f358463eb1d8333d73990d83bec8396a982e6d2d00f9dccee3144",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "64adf9dd211fdcfa092fb350d37a8c673d24dd7ddd20d99482008683ee1c6991",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "bac67e8854fc88fb14d905556ba9cc953d00ebf01c39c4ab517826e8ce5cd4b4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "2389cecb8b86adf7d4323aa86dc7f64d5c566e6b0d0e099a123618efd3b4c76b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-osxx86-64.zip",
sha256 = "d56cbcbc6e656b27d8be5a4238a5758d699b24eca7195fca952b38df1db8e099",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e59495ceb36f6e113dfd8154ac0f7478f59e5b8e1879f912eb9e4423fc7b6f12",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "5132ce51cf48260d82d3bd8fb239fd516675cb1abe3339086ebdb13f1d50beb0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "86fa73e93a21aa676312875b8483a9f55e12e9c2885a28065ed3429e7b4efbb3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-headers.zip",
sha256 = "2a36b62c3bd764974d23b507de6ea0356e17f46cd8dc67dd6b33acc7afa32708",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-sources.zip",
sha256 = "61e980c42f0cf7991c4d0578751241cdf7c9a6fea9241b7d5fa631479ae4d719",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-windowsx86-64.zip",
sha256 = "aa53a0537813eb8092b6052adfe8f8b4ed7b237ae3541f29001dcc5bbcad9fcc",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-linuxx86-64.zip",
sha256 = "99ed1a95e84f6fdcfdf9f052c8f303ac096a77865ee0fe4cd651d7b73fafe75e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-osxx86-64.zip",
sha256 = "13b0aa8a8d579d4983ecdee33d89cdd720d266e5a6edcc57b2c5fef59b0ac943",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-windowsx86-64.zip",
sha256 = "1cbc198e1ed610614ca24449367007178452a1c2073536b307030d69cc7a4f3c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-linuxx86-64.zip",
sha256 = "d7c233f44c19d6775a42ab9d5ae0be7ad9644d5edffa74fe017ff4356db3a058",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-osxx86-64.zip",
sha256 = "9d8e2a1d71ab7b7e2db75a261665aa994e84e4505caf86ab055d2b39c40c16d3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-windowsx86-64.zip",
sha256 = "3b56a59a07cb44dd169fa986c4c39a4cef8c8d9b69a9b35e0975bd6530fdc067",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-linuxx86-64.zip",
sha256 = "317d98639060c5174cedfe9a18b8f81db85ef193a60ecc938ff76e2529be7ee6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-osxx86-64.zip",
sha256 = "a600f4fc60598f42acb52c7b17fcfa42255e48033507cd2ad9e00b985594ca85",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-windowsx86-64.zip",
sha256 = "77e2066636643b61fdc2febe643f4dc5cfdd01f71680c17d6268db20c67921c0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-linuxx86-64.zip",
sha256 = "daab49d464cd0e18ab911470abccc7b70d1e77b4afe0874a80e765a99c93b988",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-osxx86-64.zip",
sha256 = "848464ca9a3d494de18e8f269d771bda55fa73e62cda6a1563e3d7c33ba07c7e",
build_file_content = cc_library_shared,
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_shuffleboard_api",
artifact = "edu.wpi.first.shuffleboard:api:2022.1.1",
artifact_sha256 = "889c805d97a6be839e95ad21eb113621d3f8d24d9b73dcefaee00b492a507c98",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibj_wpilibj-java",
artifact = "edu.wpi.first.wpilibj:wpilibj-java:2022.1.1",
artifact_sha256 = "27b8d98abffcaca0f493cd5bda9a1b4ab64e8fceb513b2241aab08d961951179",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_hal_hal-java",
artifact = "edu.wpi.first.hal:hal-java:2022.1.1",
artifact_sha256 = "cd49ca18066d1eafa8877a3342383127557f594f302bd422f0ea21f850ca94cb",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiutil_wpiutil-java",
artifact = "edu.wpi.first.wpiutil:wpiutil-java:2022.1.1",
artifact_sha256 = "7b0f5c9d4ef7d98c0d2d735cb1af31461840811c6b8d5fd21d96a61ffaa1e34d",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_ntcore_ntcore-java",
artifact = "edu.wpi.first.ntcore:ntcore-java:2022.1.1",
artifact_sha256 = "b5065d34aaadca8968748df86b8850667d3a8397ec5977316d961de5311ebcc0",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpimath_wpimath-java",
artifact = "edu.wpi.first.wpimath:wpimath-java:2022.1.1",
artifact_sha256 = "e3d2c1e4e46abf98ddaf2e1a77abdd76d63d18b634a212070ac1f350141d06a6",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cameraserver_cameraserver-java",
artifact = "edu.wpi.first.cameraserver:cameraserver-java:2022.1.1",
artifact_sha256 = "99427cbfd43c933608c7b243509f3f7ad43872fa0057defcadbc09d8b4bdfb46",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cscore_cscore-java",
artifact = "edu.wpi.first.cscore:cscore-java:2022.1.1",
artifact_sha256 = "ac38684b7ec825c627ca73d18e41cd956205250af16a90688c8d98e7d9a0c187",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-java",
artifact = "edu.wpi.first.wpilibOldCommands:wpilibOldCommands-java:2022.1.1",
artifact_sha256 = "a07ecd41b13bff0649a0feb30c9f62694145cc222ee1cb2c8cc57bca2b6bb0cc",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-java",
artifact = "edu.wpi.first.wpilibNewCommands:wpilibNewCommands-java:2022.1.1",
artifact_sha256 = "d87aab4d067dbb9c06203667bb7a8c9d658d9aa731c91370446f57a75335ffff",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-linux64.jar",
sha256 = "6bfeeb5a2f28506565f80636a04d234a52e3789081356dee5de00827983125f5",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-mac64.jar",
sha256 = "d7b153f095de363a2d7a391dbbd3708903837032dabbe97943ab9225b1cfe5fd",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-win64.jar",
sha256 = "ddaaae6bbccc1e1ea67e3e93dfc4227af37026b19bd54b2b53677d65b2189877",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-linux64.jar",
sha256 = "de5630b5f9a049a84a213123add1242feff072ce8ccba09e1fc822c7b497e8f7",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-mac64.jar",
sha256 = "35e6d0f5758cf46b01f446431f7ed4490708bfdb45281038d01c6f781c1229a3",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-win64.jar",
sha256 = "56a6cba57c2ce816ee6ebbb8dd34b7b7cd61c549d8863a3f43c504c354a0e979",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_robotbuilder",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/RobotBuilder/2022.1.1/RobotBuilder-2022.1.1.jar",
sha256 = "db8c22764d8c1f7bbe1550f85ed64358197dbe7b7d605607a79f91ef6465eb62",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-linux64.jar",
sha256 = "028ce82032dade135b2a409f87616807c5df0465e31d68ea05c115570c42fd9f",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-mac64.jar",
sha256 = "a2f8dab93fd56d7a16d4aeccadbb0bf72a77a933255ed44c2465f27bcab3527a",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-win64.jar",
sha256 = "ada1cf7175f2e2a0fc19179193a4ab79b570f9b79dea603828e43c75e7f70785",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-windowsx86-64.zip",
sha256 = "ffebb339b6db52878d56bc2cdf63efddd36e20a6bc9dd47ca3b326386f715a6f",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-linuxx86-64.zip",
sha256 = "9b5f47ce127d3ed057e2196537c7c2e2551fe0ca2f86b3b8634400a651c65d80",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-osxx86-64.zip",
sha256 = "e323e82116571fee1f58b18445c9eafc4158fafd8bb09fde44ae975987a91eb1",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-windowsx86-64.zip",
sha256 = "76da23b24141864d5024f9fe54133eb4a92e7d5ba13fcbb3de8848f51b84189b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-linuxx86-64.zip",
sha256 = "df80f8ff7666ab98ab358193f1f7d8f851c6cc0e9ed8d435bc06ccdf9385946b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-osxx86-64.zip",
sha256 = "b41c91ff3db5fee9181e9ece4234675e7eb4eaa7d7ee9976ad61c599d74b21b7",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-windowsx86-64.zip",
sha256 = "ef09ba600f83ea4afffbc826c54e4d0d80747a50082cea7ed44252061833cc7f",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-linuxx86-64.zip",
sha256 = "29264caec4d730e27a042bdb569333a757189a62c639d10713d62295bc176dc9",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-osxx86-64.zip",
sha256 = "d1f3f2d55154cc0644fba15d8c42a6aae33068f6e698cb6e9c6da0264a817f3c",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
| 1.1875
| 1
|
restler/decorators.py
|
agostodev/restler
| 1
|
12781509
|
<gh_stars>1-10
from restler import UnsupportedTypeError
def unsupported(type_):
"""
Mark types for that aren't supported
:param type_: Model type
:return: a handler that raises and UnsupportedTypeError for that type
"""
def handler(obj):
raise UnsupportedTypeError(type_)
return handler
def wrap_method(cls, method):
""" Helper function to help wrap _restler* methods when more
than on decorator is used on a model.
:param method: method to wrap
"""
from copy import copy
method_name = method.__func__.__name__
method_param = method
if hasattr(cls, method_name):
orig_cls_method = getattr(cls, method_name)
@classmethod
def wrap(cls_):
setattr(cls, method_name, method_param)
method = getattr(cls, method_name)
aggregate = copy(orig_cls_method())
if isinstance(aggregate, list): # _restler_types()
aggregate = set(aggregate)
aggregate.update(method())
aggregate = list(aggregate)
elif isinstance(aggregate, dict): # _restler_property_map
aggregate.update(method())
elif isinstance(aggregate, str):
# Developer shouldn't really do this, but we'll try
# to do the correct thing and use the most recently defined name
aggregate = method() # _restler_serialization_name
return aggregate
setattr(cls, method_name, wrap)
else:
setattr(cls, method_name, method)
def create_type_map(cls, type_map=None):
""" Helper function for creating type maps """
_type_map = None
if type_map:
if callable(type_map):
_type_map = type_map(cls)
else:
_type_map = type_map.copy()
else:
_type_map = {}
return _type_map
def create_serialization_name(cls, serialization_name=None):
""" Helper function for creating a serialization name """
_serialization_name = serialization_name
if serialization_name:
if callable(serialization_name):
_serialization_name = serialization_name(cls)
return _serialization_name
def create_property_map(cls, property_map=None):
""" Helper function for creating property maps """
_property_map = None
if property_map:
if callable(property_map):
_property_map = property_map(cls)
else:
_property_map = property_map.copy()
else:
_property_map = {}
return _property_map
def ae_db_decorator_builder(type_map=None, serialization_name=None, property_map=None):
"""
Creates a decorator for google.appengine.ext.db.Model
:param type_map: a map of types -> callable(value) or a callable(model)
that returns a map.
:param serialization_name: a (string) name used for the tag/key for serialization or
a callable(model) that returns a name
:param property_map: a map of (field) names (string) -> types or a callable(model)
that returns a map.
"""
from google.appengine.ext import blobstore, db
def wrap(cls):
"""
Restler serialization class decorator for google.appengine.ext.db.Model
"""
@classmethod
def _restler_types(cls):
"""
A map of types types to callables that serialize those types.
"""
_type_map = create_type_map(type_map)
from google.appengine.api import users
from webapp2 import cached_property
_type_map.update(
{
db.Query: lambda query: [obj for obj in query],
db.GeoPt: lambda obj: "%s %s" % (obj.lat, obj.lon),
db.IM: lambda obj: "%s %s" % (obj.protocol, obj.address),
users.User: lambda obj: obj.user_id() or obj.email(),
cached_property: lambda obj: cached_property,
blobstore.BlobInfo: lambda obj: str(obj.key()) # TODO is this correct?
})
return _type_map
@classmethod
def _restler_serialization_name(cls):
"""
The lowercase model classname
"""
_serialization_name = create_serialization_name(cls, serialization_name or cls.kind().lower())
return _serialization_name
@classmethod
def _restler_property_map(cls):
"""
List of model property names -> property types. The names are used in
*include_all_fields=True* Property types must be from
**google.appengine.ext.db.Property**
"""
_property_map = create_property_map(cls, property_map)
_property_map.update(cls.properties())
return _property_map
wrap_method(cls, _restler_types)
wrap_method(cls, _restler_property_map)
cls._restler_serialization_name = _restler_serialization_name
return cls
return wrap
ae_db_serializer = ae_db_decorator_builder()
def ae_ndb_decorator_builder(type_map=None, serialization_name=None, property_map=None):
"""
Restler serializationclass decorator for google.appengine.ext.ndb.Model
"""
from google.appengine.ext import ndb
def wrap(cls):
@classmethod
def _restler_types(cls):
"""
A map of types types to callables that serialize those types.
"""
from google.appengine.api import users
from webapp2 import cached_property
_type_map = create_type_map(type_map)
_type_map.update({
ndb.query.Query: lambda query: [obj for obj in query],
ndb.ComputedProperty: unsupported(ndb.ComputedProperty),
ndb.GenericProperty: unsupported(ndb.GenericProperty),
ndb.GeoPt: lambda obj: "%s %s" % (obj.lat, obj.lon),
ndb.PickleProperty: unsupported(ndb.PickleProperty),
users.User: lambda obj: obj.user_id() or obj.email(),
cached_property: lambda obj: cached_property,
})
return _type_map
@classmethod
def _restler_serialization_name(cls):
"""
The lowercase model classname
"""
_serialization_name = create_serialization_name(cls, serialization_name or cls.__name__.lower())
return _serialization_name
@classmethod
def _restler_property_map(cls):
"""
List of model property names if *include_all_fields=True*
Property must be from **google.appengine.ext.ndb.Property**
"""
_property_map = create_property_map(cls, property_map)
_property_map.update(dict((name, prop.__class__) for name, prop in cls._properties.items()))
return _property_map
wrap_method(cls, _restler_types)
wrap_method(cls, _restler_property_map)
cls._restler_serialization_name = _restler_serialization_name
return cls
return wrap
ae_ndb_serializer = ae_ndb_decorator_builder()
def django_decorator_builder(type_map=None, serialization_name=None, property_map=None):
"""
Creates a decorator for django.db.Models
:param type_map: a map of types -> callable(value) or a callable(model)
that returns a map.
:param serialization_name: a (string) name used for the tag/key for serialization or
a callable(model) that returns a name
:param property_map: a map of (field) names (string) -> types or a callable(model)
that returns a map.
"""
def wrap(cls):
"""
Restler serialization class decorator for django.db.models
"""
@classmethod
def _restler_types(cls):
"""
A map of types types to callables that serialize those types.
"""
from django.db.models.query import QuerySet
from django.db.models import CommaSeparatedIntegerField, FileField, FilePathField, ImageField
import json
_type_map = create_type_map(type_map)
_type_map.update({
QuerySet: lambda query: list(query),
CommaSeparatedIntegerField: lambda value: json.loads(value),
ImageField: unsupported(ImageField),
FileField: unsupported(FileField),
FilePathField: unsupported(FilePathField)
})
return _type_map
@classmethod
def _restler_serialization_name(cls):
"""
The lowercase model classname
"""
_serialization_name = create_serialization_name(cls, serialization_name or cls.__name__.lower())
return _serialization_name
@classmethod
def _restler_property_map(cls):
"""
List of model property names -> property types. The names are used in
*include_all_fields=True* Property must be from **django.models.fields**
"""
from django.db.models.fields.related import ForeignKey, ManyToManyField, OneToOneField, RelatedObject
# Relation fields (and their related objects) need to be handled specifically as there is no single way to
# handle them -- they should be handled explicity through callables.
excluded_types = {ForeignKey, ManyToManyField, OneToOneField, RelatedObject}
name_map = cls._meta._name_map
all_field_names = cls._meta.get_all_field_names()
_property_map = create_property_map(cls, property_map)
new_property_map = dict([(name, name_map[name][0].__class__)
for name in all_field_names if name_map[name][0].__class__ not in excluded_types])
_property_map.update(new_property_map)
return _property_map
wrap_method(cls, _restler_types)
wrap_method(cls, _restler_property_map)
cls._restler_serialization_name = _restler_serialization_name
return cls
return wrap
django_serializer = django_decorator_builder()
def sqlalchemy_decorator_builder(type_map=None, serialization_name=None, property_map=None):
"""
Creates a decorator for sqlalchemy models
:param type_map: a map of types -> callable(value) or a callable(model)
that returns a map.
:param serialization_name: a (string) name used for the tag/key for serialization or
a callable(model) that returns a name
:param property_map: a map of (field) names (string) -> types or a callable(model)
that returns a map.
"""
def wrap(cls):
"""
Restler serialization class decorator for SqlAlchemy models
"""
@classmethod
def _restler_types(cls):
"""
A map of types types to callables that serialize those types.
"""
from sqlalchemy.types import Binary, Interval, LargeBinary, PickleType
from sqlalchemy.orm.query import Query
_type_map = create_type_map(type_map)
_type_map.update({
Query: lambda query: list(query),
Binary: unsupported(Binary),
Interval: unsupported(Interval),
LargeBinary: unsupported(LargeBinary),
PickleType: unsupported(PickleType)
})
return _type_map
@classmethod
def _restler_serialization_name(cls):
"""
The lowercase model classname
"""
_serialization_name = create_serialization_name(cls, serialization_name or cls.__name__.lower())
return _serialization_name
@classmethod
def _restler_property_map(cls):
"""
List of model property names -> property types. The names are used in
*include_all_fields=True* Property must be from **sqlalchemy.types**
"""
_property_map = create_property_map(cls, property_map)
columns = cls.__table__.columns
column_map = dict([(name, columns.get(name).type.__class__) for name in columns.keys()])
_property_map.update(column_map)
return _property_map
wrap_method(cls, _restler_types)
wrap_method(cls, _restler_property_map)
cls._restler_serialization_name = _restler_serialization_name
return cls
return wrap
sqlalchemy_serializer = sqlalchemy_decorator_builder()
| 2.515625
| 3
|
mllearn/alg_adapt/mlknn.py
|
Lxinyuelxy/multi-label-learn
| 4
|
12781510
|
<filename>mllearn/alg_adapt/mlknn.py
import copy
import numpy as np
from sklearn.neighbors import NearestNeighbors
class MLKNN:
def __init__(self, k = 6):
self.k = k
def fit(self, X, y):
self.X = X
self.y = y
self.m = X.shape[0]
self.label_count = y.shape[1]
self.knn = NearestNeighbors(self.k)
self.knn.fit(X)
self.s = 1
self.hj = self.countHj()
self.kj = np.zeros(self.label_count)
self.ir_kj = np.zeros(self.label_count)
self.neighbors = self.knn.kneighbors(n_neighbors=self.k, return_distance=False)
for label in range(self.label_count):
for r in range(self.k+1):
self.kj[label] += self.countKj(False, label, r)
self.ir_kj[label] += self.countKj(True, label, r)
return self
def countHj(self):
hj = np.sum(self.y, axis=0)
hj = (self.s + hj) / (self.s*2 + self.m)
hj = hj / (1 - hj)
return hj
def countKj(self, isContrary, label, r):
kj = 0
for sample in range(self.m):
cj = np.sum(self.y[self.neighbors[sample], label])
if isContrary == False:
if self.y[sample, label] == 1 and cj == r:
kj += 1
else:
if self.y[sample, label] == 0 and cj == r:
kj += 1
return kj
def predict(self, X_pre):
result = np.zeros((X_pre.shape[0], self.label_count))
X_pre_neighbors = self.knn.kneighbors(X_pre, n_neighbors=self.k, return_distance=False)
for sample in range(X_pre.shape[0]):
for label in range(self.label_count):
cj = np.sum(self.y[X_pre_neighbors[sample], label])
kj_cj = self.countKj(False, label, cj)
ir_kj_cj = self.countKj(True, label, cj)
cj_hj = (self.s + kj_cj) / (self.s*(self.k+1) + self.kj[label])
cj_ir_hj = (self.s + ir_kj_cj) / (self.s*(self.k+1) + self.ir_kj[label])
if self.hj[label] * cj_hj / cj_ir_hj > 1:
result[sample, label] = 1
return result
| 3.171875
| 3
|
for_interfaces.py
|
cybermetabolism/devoji-to-gitcoin-bounty-oracle
| 0
|
12781511
|
html_code = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>jekyll ui</title>
</head>
<body>
<h2>emoji language gitcoin bounty</h2>
<p>Please enter the emoji code here: </p>
<input type="button" value="🐛" id="button1"/>
<input type="button" value="🌟" id="button2"/>
<input type="button" value="👷" id="button3"/>
<input type="button" value="🏃" id="button4"/>
<form method="POST">
<input name="text" type="text" value="" id="text" autocomplete="off" style="width: 300px; height: 150px" />
<input type="submit">
</form>
<script type="text/javascript">
document.getElementById("button1").addEventListener('click', function () {
var text = document.getElementById('text');
text.value += ' 🐛';
});
</script>
<script type="text/javascript">
document.getElementById("button2").addEventListener('click', function () {
var text = document.getElementById('text');
text.value += ' 🌟';
});
</script>
<script type="text/javascript">
document.getElementById("button3").addEventListener('click', function () {
var text = document.getElementById('text');
text.value += ' 👷';
});
</script>
<script type="text/javascript">
document.getElementById("button4").addEventListener('click', function () {
var text = document.getElementById('text');
text.value += ' 🏃';
});
</script>
</body>
</html>
"""
| 2.25
| 2
|
emf/emf_funks.py
|
markmbaum/emf
| 1
|
12781512
|
from . import os, np, pd
from . import emf_class
def _path_manage(filename_if_needed, extension, **kwargs):
"""This function takes a path string through the kwarg 'path' and
returns a path string with a file name at it's end, to save a file
at that location. If the path string is a directory (ends with a slash
or doesn't but has preceding directory elements) a new path string is
returned with the 'filename_if_needed' and 'extension' arguments
appended. If the path string already has a file name at its end, the
input extension will replace any preexisting one. If no path string is
passed in via the keyword argument 'path', the returned path is simply
the input filename_if_needed with the input extension at its end. If the
last part of the path string is supposed to be the filename, include an
extension.
args:
filename_if_needed - name of file if not in path string
extension - file extension
kwargs:
path - string, destination/filename for saved file(s)
returns:
conditioned path string"""
#remove extensions from filename_if_needed
if('.' in os.path.basename(filename_if_needed)):
filename_if_needed = filename_if_needed[:filename_if_needed.rfind('.')]
#make sure the extension has a period at its beginning
if(extension):
if(extension[0] != '.'):
extension = '.' + extension
#check if there is a path kwarg
if('path' in kwargs):
path = kwargs['path']
#check if the path is empty
if(not path):
return(filename_if_needed + extension)
#check if the input path is a directory
elif(os.path.isdir(path)):
#join the directory string, file string, and extension string
return(os.path.join(path, filename_if_needed + extension))
else:
#path string interpreted as path to a file, not directory
#strip any extensions
if('.' in os.path.basename(path)):
path = path[:path.rfind('.')]
#put intended extension on the end and return
return(path + extension)
else: #otherwise just use filename_if_needed and extension
return(filename_if_needed + extension)
def _check_extension(file_path, correct_ext, message):
"""Check that a file path has the desired extension, raising an error if
not and appending the correct extension if no extension is present.
args:
file_path - string, a target file path
correct_ext - string, the correct extension for the target path,
with or without the period
message - string, error message if the extention is wrong
returns:
file_path - string, valid file path"""
#manage period in correct_ext
if(not (correct_ext[0] == '.')):
correct_ext = '.' + correct_ext
#if it has an extension, check it
if('.' in os.path.basename(file_path)):
#check that the file exists
if(not os.path.isfile(file_path)):
raise(emf_class.EMFError("""
The file path:
"%s"
is not recognized as an existing file.""" % file_path))
#get extension and check it against correct_ext
ext = file_path[file_path.rfind('.'):]
if(ext != correct_ext):
raise(emf_class.EMFError(message))
else:
return(file_path)
#no extension, just append the correct extension and check file exists
else:
file_path += correct_ext
#check that the file exists
if(not os.path.isfile(file_path)):
raise(emf_class.EMFError("""
The file path:
"%s"
is not recognized as an existing file.""" % file_path))
return(file_path)
def _is_number(s):
"""Check if an element can be converted to a float, returning True
if it can and False if it can't"""
if((s is False) or (s is True)):
return(False)
try:
float(s)
except(ValueError, TypeError):
return(False)
else:
return(True)
def _is_int(x):
if(_is_number(x)):
if(float(x) == int(float(x))):
return(True)
return(False)
def _sig_figs(v, figs):
if(v == 0):
return(0)
if(pd.isnull(v)):
return(v)
w = round(v, int(figs - np.ceil(np.log10(np.abs(v)))))
if(w == round(w)):
return(int(w))
return(w)
def _check_intable(f):
"""If a float is a whole number, convert it to an integer"""
if(_is_int(f)):
return(int(float(f)))
else:
return(float(f))
def _check_to_array(x):
"""convert an iterable to a numpy array of type float or put a single
number into an array"""
if(hasattr(x, '__iter__')):
return(np.array(x, dtype=float))
else:
return(np.array([float(x)]))
#Flatten a list of lists. Only works on a list and the first level of
#sublists, not recursively on all levels
_flatten = lambda L: [item for sublist in L for item in sublist]
_to_alphanumeric = lambda s: ''.join(ch for ch in s if ch.isalnum())
def _path_str_condition(s):
s = s.replace(' ', '-')
return(_to_alphanumeric(s))
def _print_str_list(L):
s = [repr(i) for i in L]
if(len(s) > 2):
s = ', '.join(s[:-1]) + ', and ' + s[-1]
else:
s = ' and '.join(s)
return(s)
def _Levenshtein_group(V, W):
"""Match each string in vector V to a string in vector W, using global
least Levenshtein distance for each match.
V - vector of strings
W - vector of strings, can be longer than V but not shorter
returns:
list of matches in the same order as V, comprising elements of W"""
#set up some loop variables
vmask = np.ones((len(V),), dtype = bool)
vrange = np.arange(len(V))
wmask = np.ones((len(W),), dtype = bool)
wrange = np.arange(len(W))
matches = ['']*len(V)
#calculate distance between all pairs of words
M = np.zeros((len(V),len(W)))
for i in vrange:
for j in wrange:
M[i,j] = _Levenshtein_distance(V[i],W[j])
#find a match in W for each word in V with minimum global distances
for n in vrange:
vr = vrange[vmask]
wr = wrange[wmask]
#find the minimum distance of any pairs left
i_min = vr[0]
j_min = wr[0]
min_dist = M[i_min,j_min]
for i in vr:
for j in wr:
if(M[i,j] < min_dist):
min_dist = M[i,j]
i_min = i
j_min = j
#store the match and update the masks
matches[i_min] = W[j_min]
vmask[i_min] = False
wmask[j_min] = False
return(matches)
def _Levenshtein_find(s, V):
"""find the string in V that most clostly resembles s, as measured by
the Levenshtein distance
args:
s - string
V - iterable of strings
returns:
string in V most similar to s
index of that string in V"""
d = np.zeros((len(V),))
for (i,t) in enumerate(V):
d[i] = _Levenshtein_distance(s,t)
idx = np.argmin(d)
return(V[idx], idx)
def _Levenshtein_distance(s, t):
"""Calculate string similarity metric with a basic edit distance
routine to find the Levenshtein Distance, without case sensitivity
args:
s - string 1
t - string 2
returns:
d - distance metric, count of edit operations required
information source:
http://web.archive.org/web/20081224234350/http://www.dcs.shef.ac.uk/~sam/stringmetrics.html#Levenshtein"""
#check empty strings
if(len(s) == 0):
return(len(t))
elif(len(t) == 0):
return(len(s))
#lower case
s = s.lower()
t = t.lower()
#initialize grid
ls = len(s)
lt = len(t)
D = np.zeros((ls,lt))
if(s[0] != t[0]):
D[0,0] = 1.0
D[:,0] = np.arange(D[0,0], ls + D[0,0])
D[0,:] = np.arange(D[0,0], lt + D[0,0])
#vector to store edit operation scores
e = np.zeros((3,))
for i in range(1,ls):
for j in range(1,lt):
e[0] = D[i-1,j-1]
if(s[i] != t[j]):
e[0] += 1
e[1] = D[i-1,j] + 1
e[2] = D[i,j-1] + 1
D[i,j] = np.min(e)
return(D[-1,-1])
| 3.40625
| 3
|
pyrobud/custom_modules/debug.py
|
x0x8x/pyrobud
| 0
|
12781513
|
from re import compile, MULTILINE
import telethon as tg
from pyrobud.util.bluscream import UserStr, telegram_uid_regex
from .. import command, module
class DebugModuleAddon(module.Module):
name = "Debug Extensions"
@command.desc("Dump all the data of a message to your cloud")
@command.alias("mdp")
async def cmd_mdumpprivate(self, msg: tg.custom.Message):
if not msg.is_reply: return
reply_msg = await msg.get_reply_message()
await msg.delete()
data = f"```{reply_msg.stringify()}```"
await self.bot.client.send_message("me", data)
@command.desc("Convert all tg uids to profile links")
@command.alias("idlink", "linkids", "linkid")
async def cmd_idlinks(self, msg: tg.custom.Message):
if not msg.is_reply: return
reply_msg = await msg.get_reply_message()
matches = telegram_uid_regex.finditer(reply_msg.text, MULTILINE)
uids = list()
for matchNum, match in enumerate(matches, start=1):
if not match.group() in uids:
uids.append(match.group())
if len(uids) < 1: return "No UIDs found in the given message."
ret = f"Found **{len(uids)}** UIDs:\n"
for uid in uids:
try:
user = await self.bot.client.get_entity(int(uid))
ret += f"\n - {UserStr(user, True)}"
except: ret += f"\n - [{uid}](tg://user?id={uid})"""
return ret
| 2.21875
| 2
|
screen_reader.py
|
vivek-kumar9696/Quickdraw_LOGO_Detection
| 0
|
12781514
|
# coding: utf-8
# In[ ]:
import cv2
from keras.models import load_model
import numpy as np
from collections import deque
from keras.preprocessing import image
import keras
import os
# In[ ]:
model1 = load_model('mob_logo_model.h5')
val = ['Adidas','Apple','BMW','Citroen','Fedex','HP','Mcdonalds','Nike','none','Pepsi','Puma']
pred_class = 8
# In[ ]:
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
# In[ ]:
def main():
logos = get_logos()
cap = cv2.VideoCapture(0)
Lower_green = np.array([10,130,130])
Upper_green = np.array([40,255,255])
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
value = np.zeros((224,224,3), dtype = np.uint8)
#print(blackboard)
digit = np.zeros((200, 200, 3), dtype=np.uint8)
pred_class = 8
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img,(400,250),(624,474),(255,0,255),5)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
Lower_green= np.array([l_h, l_s, l_v]) # use the trackbars to customize the colour to track to make the doodles
Upper_green = np.array([u_v, u_s, u_v]) #0,131,157 179,255,255 (orange color settings)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) >= 1:
cnt = max(cnts, key=cv2.contourArea)
#print(cnt)
if cv2.contourArea(cnt) > 200:
((x, y), radius) = cv2.minEnclosingCircle(cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), 2)
elif len(cnts) == 0:
if len(pts) != []:
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key=cv2.contourArea)
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
value = blackboard[250:474, 400:624]
pred_probab, pred_class = keras_predict(model1, value)
print(val[pred_class], pred_probab)
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
img = overlay(img, logos[pred_class])
cv2.imshow("Frame", img)
cv2.imshow("Res", res)
cv2.imshow("mask", mask)
k = cv2.waitKey(10)
if k == 27:
break
# In[ ]:
def keras_predict(model, image):
processed = keras_process_image(image)
print("processed: " + str(processed.shape))
pred_probab = model1.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
# In[ ]:
def keras_process_image(img):
img_array = image.img_to_array(img)
img_array_expanded_dims = np.expand_dims(img_array, axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
# In[ ]:
def get_logos():
logos_folder = "../logo/"
logos = []
for logo in range(len(os.listdir(logos_folder))):
logos.append(cv2.imread(logos_folder + str(logo) + '.png', cv2.IMREAD_UNCHANGED))
print(logos)
return logos
# In[ ]:
def overlay(image, logo):
x,y,z = logo.shape
#try:
image[0:x, 0:y] = blend_transparent(image[0:x, 0:y ], logo)
#except:
#pass
return image
# In[ ]:
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# In[ ]:
keras_predict(model1, np.zeros((224, 224, 3), dtype=np.uint8))
main()
| 2.828125
| 3
|
BaseType/Const.py
|
dimiroul/BacktestFramePublic
| 0
|
12781515
|
<filename>BaseType/Const.py
import pandas
import os
class const(object):
"""
const(object):用于保存回测框架使用的各种常量的类
"""
def __init__(self):
self.constants = dict()
def __getitem__(self, item):
if item not in self.constants:
raise RuntimeError("const {:s} not defined".format(str(item)))
else:
return self.constants[item]
def __setitem__(self, key, value):
self.constants[key] = value
# 定义const类的实例CONST,作为全局变量
CONST = const()
# CURRENCY(默认货币):人民币(CNY)
CONST["CURRENCY"] = "CNY"
# START_TIME(默认起始时间):1900/01/01 00:00:00
# END_TIME(默认结束时间):2099/12/31 23:59:59
CONST["START_TIME"] = pandas.to_datetime("1900/01/01 00:00:00", format="%Y%m%d %H:%M:%S")
CONST["END_TIME"] = pandas.to_datetime("2099/12/31 23:59:59", format="%Y%m%d %H:%M:%S")
# DEFAULT_QUEUE_SIZE(默认队列长度):16
CONST["DEFAULT_QUEUE_SIZE"] = 16
# ENCODING(默认文件编码):GB2312
# RESAMPLE(默认合并tick时间周期):1分钟
CONST["ENCODING"] = "GB2312"
CONST["RESAMPLE"] = "1min"
# THIS_PATH(项目本地地址)
# QUEUE_PATH(事件队列记录地址):/log/QueueLog.csv
# DEFAULT_PATH(默认事件记录地址):/log/DefaultLog.csv
# BAR_PATH(Bar事件记录地址):/log/BarLog.csv
# PRICE_PATH(Price事件记录地址):/log/PriceLog.csv
# SIGNAL_PATH(Signal事件记录地址):/log/SignalLog.csv
# ORDER_PATH(Order事件记录地址):/log/OrderLog.csv
# CANCEL_PATH(Cancel事件记录地址):/log/CancelLog.csv
# FILL_PATH(Fill事件记录地址):/log/FillLog.csv
# PORTFOLIO_PATH(Portfolio信息记录地址):/log/PortfolioLog.csv
# STRATEGY_PATH(Strategy信息记录地址):/log/StrategyLog.csv
CONST["THIS_PATH"] = os.getcwd()
CONST["QUEUE_PATH"] = CONST["THIS_PATH"] + "/log/QueueLog.csv"
CONST["DEFAULT_PATH"] = CONST["THIS_PATH"] + "/log/DefaultLog.csv"
CONST["BAR_PATH"] = CONST["THIS_PATH"] + "/log/BarLog.csv"
CONST["PRICE_PATH"] = CONST["THIS_PATH"] + "/log/PriceLog.csv"
CONST["SIGNAL_PATH"] = CONST["THIS_PATH"] + "/log/SignalLog.csv"
CONST["ORDER_PATH"] = CONST["THIS_PATH"] + "/log/OrderLog.csv"
CONST["CANCEL_PATH"] = CONST["THIS_PATH"] + "/log/CancelLog.csv"
CONST["FILL_PATH"] = CONST["THIS_PATH"] + "/log/FillLog.csv"
CONST["PORTFOLIO_PATH"] = CONST["THIS_PATH"] + "/log/PortfolioLog.csv"
CONST["STRATEGY_PATH"] = CONST["THIS_PATH"] + "/log/StrategyLog.csv"
# SYMBOL(默认标的代码):NULL
# EXCHANGE(默认交易所):NULL
# PER_HAND(默认每手数量):100
# PER_PRICE(默认报价单位):0.01
CONST["SYMBOL"] = "NULL"
CONST["EXCHANGE"] = "NULL"
CONST["PER_HAND"] = 100
CONST["PER_PRICE"] = 0.01
# BID_COMMISSION(默认买入费用定额):0.0
# BID_COMMISSION_RATE(默认买入费用费率):0.00015
# BID_TAX(默认买入缴税定额):0.0
# BID_TAX_RATE(默认买入缴税税率):0.0
CONST["BID_COMMISSION"] = 0.0
CONST["BID_COMMISSION_RATE"] = 0.00015
CONST["BID_TAX"] = 0.0
CONST["BID_TAX_RATE"] = 0.0
# ASK_COMMISSION(默认卖出费用定额):0.0
# ASK_COMMISSION_RATE(默认卖出费用费率):0.00015
# ASK_TAX(默认卖出缴税定额):0.0
# ASK_TAX_RATE(默认卖出缴税税率):0.001
CONST["ASK_COMMISSION"] = 0.0
CONST["ASK_COMMISSION_RATE"] = 0.00015
CONST["ASK_TAX"] = 0.0
CONST["ASK_TAX_RATE"] = 0.001
# CRT_PRICE(默认单位现价):0.0
# NET_PRICE(默认单位净值):0.0
# BOOK_PRICE(默认单位面值):1.0
# VOLUME(默认数量):0.0
CONST["CRT_PRICE"] = 0.0
CONST["NET_PRICE"] = 0.0
CONST["BOOK_VALUE"] = 1.0
CONST["VOLUME"] = 0.0
# MULTIPLIER(默认乘数):1
# MARGIN_RATE(默认保证金比例):1
CONST["MULTIPLIER"] = 1
CONST["MARGIN_RATE"] = 1
# TIME_OFFSET(默认时间流逝单位):seconds
# TIME_OFFSET_TIMES(默认时间流逝量):1
CONST["TIME_OFFSET"] = "seconds"
CONST["TIME_OFFSET_TIMES"] = 1
| 2.75
| 3
|
tests/model_test.py
|
gaganchhabra/appkernel
| 156
|
12781516
|
import json
from decimal import Decimal
from pymongo import MongoClient
from appkernel import PropertyRequiredException
from appkernel.configuration import config
from appkernel.repository import mongo_type_converter_to_dict, mongo_type_converter_from_dict
from .utils import *
import pytest
from jsonschema import validate
def setup_module(module):
config.mongo_database = MongoClient(host='localhost')['appkernel']
def setup_function(function):
""" executed before each method call
"""
print('\n\nSETUP ==> ')
Project.delete_all()
User.delete_all()
def test_required_field():
project = Project()
with pytest.raises(PropertyRequiredException):
project.finalise_and_validate()
with pytest.raises(PropertyRequiredException):
project.update(name=None)
project.finalise_and_validate()
project.update(name='some_name')
project.finalise_and_validate()
def test_append_to_non_existing_non_defined_element():
project = Project().update(name='strange project')
project.append_to(users=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'users' in project.__dict__
assert len(project.users) == 1
assert isinstance(project.users[0], Task)
print(('{}'.format(project)))
def test_append_to_non_existing_element():
project = Project().update(name='strange project')
project.append_to(tasks=Task().update(name='some_task', description='some description'))
project.finalise_and_validate()
assert 'tasks' in project.__dict__
assert len(project.tasks) == 1
assert isinstance(project.tasks[0], Task)
print(('{}'.format(project)))
def test_remove_non_existing_element():
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=Task())
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(tasks=None)
with pytest.raises(AttributeError):
project = Project().update(name='strange project')
project.remove_from(somehtings=Task())
def test_remove_existing_defined_element():
task1 = Task().update(name='some_task', description='some description')
task2 = Task().update(name='some_other_task', description='some other description')
task3 = Task().update(name='a third task', description='some third description')
project = Project().update(name='strange project')
project.append_to(tasks=[task1, task2])
project.finalise_and_validate()
assert len(project.tasks) == 2
project.append_to(tasks=task3)
project.finalise_and_validate()
assert len(project.tasks) == 3
print(('{}'.format(project)))
project.remove_from(tasks=task1)
assert len(project.tasks) == 2
print(('{}'.format(project)))
def test_generator():
task = Task()
task.name = 'some task name'
task.description = 'some task description'
task.finalise_and_validate()
print(('\nTask:\n {}'.format(task)))
assert task.id is not None and task.id.startswith('U')
def test_converter():
user = create_and_save_a_user('test user', 'test password', 'test description')
print(('\n{}'.format(user.dumps(pretty_print=True))))
assert user.password.startswith('<PASSWORD>')
hash1 = user.password
user.save()
assert user.password.startswith('<PASSWORD>')
assert hash1 == user.password
def test_nested_object_serialisation():
portfolio = create_a_portfolion_with_owner()
print((portfolio.dumps(pretty_print=True)))
check_portfolio(portfolio)
def test_describe_model():
user_spec = User.get_parameter_spec()
print(User.get_paramater_spec_as_json())
assert 'name' in user_spec
assert user_spec.get('name').get('required')
assert user_spec.get('name').get('type') == 'str'
assert len(user_spec.get('name').get('validators')) == 2
for validator in user_spec.get('name').get('validators'):
if validator.get('type') == 'Regexp':
assert validator.get('value') == '[A-Za-z0-9-_]'
assert user_spec.get('roles').get('sub_type') == 'str'
def test_describe_rich_model():
project_spec = Project.get_parameter_spec()
print(Project.get_paramater_spec_as_json())
assert project_spec.get('created').get('required')
assert project_spec.get('created').get('type') == 'datetime'
assert project_spec.get('name').get('required')
assert project_spec.get('name').get('type') == 'str'
name_validators = project_spec.get('name').get('validators')
assert len(name_validators) == 1
assert name_validators[0].get('type') == 'NotEmpty'
assert name_validators[0].get('value') is None or 'null'
tasks = project_spec.get('tasks')
assert not tasks.get('required')
assert 'sub_type' in tasks
assert tasks.get('type') == 'list'
task = tasks.get('sub_type')
assert task.get('type') == 'Task'
assert 'props' in task
props = task.get('props')
assert not props.get('closed_date').get('required')
assert props.get('closed_date').get('type') == 'datetime'
assert props.get('closed_date').get('validators')[0].get('type') == 'Past'
def test_json_schema():
json_schema = Project.get_json_schema()
print('\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
project = create_rich_project()
print(project.dumps(pretty_print=True))
assert json_schema.get('title') == 'Project Schema'
assert 'title' in json_schema
assert json_schema.get('type') == 'object'
assert 'name' in json_schema.get('required')
assert 'created' in json_schema.get('required')
assert 'definitions' in json_schema
assert json_schema.get('additionalProperties')
definitions = json_schema.get('definitions')
assert 'Task' in definitions
assert len(definitions.get('Task').get('required')) == 6
assert 'id' in definitions.get('Task').get('properties')
closed_date = definitions.get('Task').get('properties').get('closed_date')
assert 'string' in closed_date.get('type')
assert len(closed_date.get('type')) == 2
assert closed_date.get('format') == 'date-time'
completed = definitions.get('Task').get('properties').get('completed')
assert 'boolean' in completed.get('type')
assert len(completed.get('type')) == 1
validate(json.loads(project.dumps()), json_schema)
# todo: check the enum / make a negative test
# validator = Draft4Validator(json_schema)
# errors = sorted(validator.iter_errors(project.dumps()), key=lambda e: e.path)
# for error in errors:
# print('{}'.format(error.message, list(error.path)))
def test_json_schema_primitives_types():
json_schema = Stock.get_json_schema()
print(json.dumps(json_schema, indent=2))
props = json_schema.get('properties')
opentypes = props.get('open').get('type')
assert 'number' in opentypes
assert len(opentypes) == 1
item_types = props.get('history').get('items').get('type')
assert 'number' in item_types
len(item_types) == 1
stock = create_a_stock()
validate(json.loads(stock.dumps()), json_schema)
def test_json_schema_complex():
# print json.dumps(Portfolio.get_parameter_spec(True), indent=2)
json_schema = Portfolio.get_json_schema()
print(json.dumps(json_schema, indent=2))
stock_definition = json_schema.get('definitions').get('Stock')
assert stock_definition.get('properties').get('updated').get('format') == 'date-time'
assert stock_definition.get('properties').get('code').get('pattern') == '[A-Za-z0-9-_]'
assert stock_definition.get('properties').get('code').get('maxLength') == 4
assert stock_definition.get('properties').get('open').get('minimum') == 0
open_types = stock_definition.get('properties').get('open').get('type')
assert 'number' in open_types
assert len(open_types) == 1
sequence_types = stock_definition.get('properties').get('sequence').get('type')
assert 'number' in sequence_types
assert len(sequence_types) == 2
assert stock_definition.get('properties').get('sequence').get('minimum') == 1
assert stock_definition.get('properties').get('sequence').get('maximum') == 100
assert stock_definition.get('properties').get('sequence').get('multipleOf') == 1.0
history_types = stock_definition.get('properties').get('history').get('type')
assert 'array' in history_types
assert len(history_types) == 2
portfolio = create_portfolio('My Portfolio')
validate(json.loads(portfolio.dumps()), json_schema)
def test_json_schema_in_mongo_compat_mode():
json_schema = Project.get_json_schema(mongo_compatibility=True)
print('\n\n{}'.format(json.dumps(json_schema, indent=2)))
print('===========')
task_spec = json_schema.get('properties').get('tasks')
assert len(task_spec.get('items').get('required')) == 5
priority_spec = task_spec.get('items').get('properties').get('priority')
assert len(priority_spec.get('enum')) == 3
closed_date_spec = task_spec.get('items').get('properties').get('closed_date')
assert len(closed_date_spec.get('bsonType')) == 2
assert 'bsonType' in json_schema
assert 'id' not in json_schema
assert '$schema' not in json_schema
assert 'definitions' not in json_schema
for prop in json_schema.get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' in prop[1]
for prop in task_spec.get('items').get('properties').items():
assert 'format' not in prop[1]
assert 'bsonType' or 'enum' in prop[1]
project = create_rich_project()
print(project.dumps(pretty_print=True))
validate(json.loads(project.dumps()), json_schema)
def __assert_product_dict(product_dict: dict):
assert 'name' in product_dict
assert 'description' in product_dict
assert 'size' in product_dict
assert product_dict.get('size') == 'M'
assert 'price' in product_dict
assert isinstance(product_dict.get('price'), dict)
price_dict = product_dict.get('price')
assert '_type' in price_dict
assert price_dict.get('_type') == 'money.money.Money'
assert price_dict.get('currency') == 'EUR'
def test_custom_object_marshalling():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, Decimal)
assert amount == 10.5
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Product.loads(product_json)
assert reloaded_product is not None and isinstance(reloaded_product, Product)
assert reloaded_product.name == product.name
assert reloaded_product.description == product.description
assert reloaded_product.size == product.size
assert isinstance(reloaded_product.price, Money)
assert reloaded_product.price == product.price
def test_custom_converter_function():
product = Product(code='TRX', name='White T-Shirt', description='a stylish white shirt', size=ProductSize.M,
price=Money(10.50, 'EUR'))
product_dict = Model.to_dict(product, converter_func=mongo_type_converter_to_dict)
__assert_product_dict(product_dict)
amount = product_dict.get('price').get('amount')
assert isinstance(amount, float)
product_json = product.dumps(pretty_print=True)
print('JSON: \n{}'.format(product_json))
reloaded_product = Model.from_dict(product_dict, Product, converter_func=mongo_type_converter_from_dict)
assert isinstance(reloaded_product.price, Money)
assert isinstance(reloaded_product.price.amount, Decimal)
| 2.296875
| 2
|
mailchimp_marketing_asyncio/models/gallery_folder.py
|
john-parton/mailchimp-asyncio
| 0
|
12781517
|
<filename>mailchimp_marketing_asyncio/models/gallery_folder.py
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GalleryFolder(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'file_count': 'int',
'created_at': 'datetime',
'created_by': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'file_count': 'file_count',
'created_at': 'created_at',
'created_by': 'created_by',
'links': '_links'
}
def __init__(self, id=None, name=None, file_count=None, created_at=None, created_by=None, links=None): # noqa: E501
"""GalleryFolder - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._file_count = None
self._created_at = None
self._created_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if file_count is not None:
self.file_count = file_count
if created_at is not None:
self.created_at = created_at
if created_by is not None:
self.created_by = created_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this GalleryFolder. # noqa: E501
The unique id for the folder. # noqa: E501
:return: The id of this GalleryFolder. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GalleryFolder.
The unique id for the folder. # noqa: E501
:param id: The id of this GalleryFolder. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this GalleryFolder. # noqa: E501
The name of the folder. # noqa: E501
:return: The name of this GalleryFolder. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GalleryFolder.
The name of the folder. # noqa: E501
:param name: The name of this GalleryFolder. # noqa: E501
:type: str
"""
self._name = name
@property
def file_count(self):
"""Gets the file_count of this GalleryFolder. # noqa: E501
The number of files in the folder. # noqa: E501
:return: The file_count of this GalleryFolder. # noqa: E501
:rtype: int
"""
return self._file_count
@file_count.setter
def file_count(self, file_count):
"""Sets the file_count of this GalleryFolder.
The number of files in the folder. # noqa: E501
:param file_count: The file_count of this GalleryFolder. # noqa: E501
:type: int
"""
self._file_count = file_count
@property
def created_at(self):
"""Gets the created_at of this GalleryFolder. # noqa: E501
The date and time a file was added to the File Manager in ISO 8601 format. # noqa: E501
:return: The created_at of this GalleryFolder. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this GalleryFolder.
The date and time a file was added to the File Manager in ISO 8601 format. # noqa: E501
:param created_at: The created_at of this GalleryFolder. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def created_by(self):
"""Gets the created_by of this GalleryFolder. # noqa: E501
The username of the profile that created the folder. # noqa: E501
:return: The created_by of this GalleryFolder. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this GalleryFolder.
The username of the profile that created the folder. # noqa: E501
:param created_by: The created_by of this GalleryFolder. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def links(self):
"""Gets the links of this GalleryFolder. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this GalleryFolder. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GalleryFolder.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this GalleryFolder. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GalleryFolder, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GalleryFolder):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.015625
| 2
|
tests/test_processing_rt.py
|
jdowlingmedley/pyomeca
| 0
|
12781518
|
from itertools import permutations
import numpy as np
import pytest
from pyomeca import Angles, Rototrans, Markers
SEQ = (
["".join(p) for i in range(1, 4) for p in permutations("xyz", i)]
+ ["zyzz"]
+ ["zxz"]
)
SEQ = [s for s in SEQ if s not in ["yxz"]]
EPSILON = 1e-12
ANGLES = Angles(np.random.rand(4, 1, 100))
@pytest.mark.parametrize("seq", SEQ)
def test_euler2rot_rot2euleur(seq, angles=ANGLES, epsilon=EPSILON):
if seq == "zyzz":
angles_to_test = angles[:3, ...]
else:
angles_to_test = angles[: len(seq), ...]
r = Rototrans.from_euler_angles(angles=angles_to_test, angle_sequence=seq)
a = Angles.from_rototrans(rototrans=r, angle_sequence=seq)
np.testing.assert_array_less((a - angles_to_test).meca.abs().sum(), epsilon)
def test_construct_rt():
eye = Rototrans()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
eye = Rototrans.from_euler_angles()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
# Test the way to create a rt, but not when providing bot angles and sequence
nb_frames = 10
random_vector = Angles(np.random.rand(3, 1, nb_frames))
# with angles
rt_random_angles = Rototrans.from_euler_angles(
angles=random_vector, angle_sequence="xyz"
)
np.testing.assert_equal(rt_random_angles.time.size, nb_frames)
np.testing.assert_equal(
rt_random_angles[:-1, -1:, :], np.zeros((3, 1, nb_frames))
) # Translation is 0
# with translation
rt_random_translation = Rototrans.from_euler_angles(translations=random_vector)
np.testing.assert_equal(rt_random_translation.time.size, nb_frames)
np.testing.assert_equal(
rt_random_translation[:3, :3, :],
np.repeat(np.eye(3)[:, :, np.newaxis], nb_frames, axis=2),
) # rotation is eye3
np.arange(0, rt_random_angles.time.size / 0.5, 1 / 0.5)
rt_with_time = Rototrans(
rt_random_angles, time=np.arange(0, rt_random_angles.time.size / 100, 1 / 100),
)
assert rt_with_time.time[-1] == 0.09
with pytest.raises(IndexError):
Rototrans(data=np.zeros(1))
with pytest.raises(IndexError):
Rototrans.from_euler_angles(
angles=random_vector[..., :5],
translations=random_vector,
angle_sequence="x",
)
with pytest.raises(IndexError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="x")
with pytest.raises(ValueError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="nop")
def test_rt_from_markers():
all_m = Markers.from_random_data()
rt_xy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_yx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="yx",
axis_to_recalculate="y",
)
rt_xy_x_recalc = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yx",
axis_to_recalculate="x",
)
rt_xy_x_recalc = rt_xy_x_recalc.isel(col=[1, 0, 2, 3])
rt_xy_x_recalc[:, 2, :] = -rt_xy_x_recalc[:, 2, :]
rt_yz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yz",
axis_to_recalculate="z",
)
rt_zy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zy",
axis_to_recalculate="z",
)
rt_xy_from_yz = rt_yz.isel(col=[1, 2, 0, 3])
rt_xz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xz",
axis_to_recalculate="z",
)
rt_zx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zx",
axis_to_recalculate="z",
)
rt_xy_from_zx = rt_xz.isel(col=[0, 2, 1, 3])
rt_xy_from_zx[:, 2, :] = -rt_xy_from_zx[:, 2, :]
np.testing.assert_array_equal(rt_xy, rt_xy_x_recalc)
np.testing.assert_array_equal(rt_xy, rt_yx)
np.testing.assert_array_equal(rt_yz, rt_zy)
np.testing.assert_array_equal(rt_xz, rt_zx)
np.testing.assert_array_equal(rt_xy, rt_xy_from_yz)
np.testing.assert_array_equal(rt_xy, rt_xy_from_zx)
# Produce one that we know the solution
ref_m = Markers(np.array(((1, 2, 3), (4, 5, 6), (6, 5, 4))).T[:, :, np.newaxis])
rt_xy_from_known_m = Rototrans.from_markers(
origin=ref_m.isel(channel=[0]),
axis_1=ref_m.isel(channel=[0, 1]),
axis_2=ref_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_xy_expected = Rototrans(
np.array(
[
[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0],
[0.5773502691896257, 0.0, 0.816496580927726, 2.0],
[0.5773502691896257, -0.7071067811865475, -0.408248290463863, 3.0],
[0, 0, 0, 1.0],
]
)
)
np.testing.assert_array_equal(rt_xy_from_known_m, rt_xy_expected)
exception_default_params = dict(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(origin=all_m.isel(channel=[0, 1]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_1=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_2=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{
**exception_default_params,
**dict(axis_1=all_m.isel(channel=[0, 1], time=slice(None, 50))),
}
)
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="yyz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="xxz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="zzz")})
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_to_recalculate="h")}
)
def test_rt_transpose():
n_frames = 10
angles = Angles.from_random_data(size=(3, 1, n_frames))
rt = Rototrans.from_euler_angles(angles, angle_sequence="xyz")
rt_t = Rototrans.from_transposed_rototrans(rt)
rt_t_expected = np.zeros((4, 4, n_frames))
rt_t_expected[3, 3, :] = 1
for row in range(rt.row.size):
for col in range(rt.col.size):
for frame in range(rt.time.size):
rt_t_expected[col, row, frame] = rt[row, col, frame]
for frame in range(rt.time.size):
rt_t_expected[:3, 3, frame] = -rt_t_expected[:3, :3, frame].dot(
rt[:3, 3, frame]
)
np.testing.assert_array_almost_equal(rt_t, rt_t_expected, decimal=10)
def test_average_rt():
# TODO: investigate why this does not work
# angles = Angles.from_random_data(size=(3, 1, 100))
# or
# angles = Angles(np.arange(300).reshape((3, 1, 100)))
angles = Angles(np.random.rand(3, 1, 100))
seq = "xyz"
rt = Rototrans.from_euler_angles(angles, seq)
rt_mean = Rototrans.from_averaged_rototrans(rt)
angles_mean = Angles.from_rototrans(rt_mean, seq).isel(time=0)
angles_mean_ref = Angles.from_rototrans(rt, seq).mean(dim="time")
np.testing.assert_array_almost_equal(angles_mean, angles_mean_ref, decimal=2)
| 2.234375
| 2
|
applications/popart/bert/tests/unit/tensorflow/transformer_position_encoding_test.py
|
xihuaiwen/chinese_bert
| 0
|
12781519
|
<filename>applications/popart/bert/tests/unit/tensorflow/transformer_position_encoding_test.py
# Copyright 2019 Graphcore Ltd.
import math
import pytest
import numpy as np
import tensorflow as tf
import popart
from bert_model import BertConfig, Bert
# Extracted from the official Transformer repo:
# https://github.com/tensorflow/models/blob/638ba7a407b455de91be7cf85a1ff3a7f86cc958/official/transformer/model/model_utils.py#L32
def get_position_encoding_tf(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
position = tf.cast(tf.range(length), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.cast(num_timescales, tf.float32) - 1))
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * \
tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
@pytest.mark.parametrize("position_length, hidden_size", [(512, 768), (128, 768), (384, 768)])
def test_positional_encoding_data(position_length, hidden_size):
if not tf.executing_eagerly():
tf.enable_eager_execution()
assert(tf.executing_eagerly())
builder = popart.Builder(opsets={
"ai.onnx": 9,
"ai.onnx.ml": 1,
"ai.graphcore": 1
})
config = BertConfig(vocab_length=9728,
batch_size=1,
hidden_size=hidden_size,
max_positional_length=position_length,
sequence_length=128,
activation_type='relu',
popart_dtype="FLOAT",
no_dropout=True,
positional_embedding_init_fn="TRANSFORMER",
inference=True)
popart_model = Bert(config, builder=builder)
shape = (config.max_positional_length, config.hidden_size)
pos_pa = popart_model.generate_transformer_periodic_pos_data(
config.dtype, shape)
pos_tf = get_position_encoding_tf(shape[0], shape[1]).numpy()
# Tensorflow broadcast multiplication seems to produce slightly different results
# to numpy, hence the higher than expected error. The embeddings do correlate well
# between the two despite this.
assert(np.all(np.abs(pos_tf - pos_pa) < 5e-5))
def simplified_generator(pos_length, hidden_size):
"""
Taken verbatim from the Enigma data generator implementation
"""
scale = 4
def value(x, y, siz):
return .02/.707*np.cos(2*scale*np.pi*x*y/siz)
data = []
for x in range(pos_length):
data.append([])
for y in range(hidden_size):
data[x].append(value(x, y, hidden_size))
return np.asarray(data)
@pytest.mark.parametrize("position_length, hidden_size", [(512, 768), (128, 768), (384, 768)])
def test_simplified_position_encoding(position_length, hidden_size):
builder = popart.Builder(opsets={
"ai.onnx": 9,
"ai.onnx.ml": 1,
"ai.graphcore": 1
})
config = BertConfig(vocab_length=9728,
batch_size=1,
hidden_size=hidden_size,
max_positional_length=position_length,
sequence_length=128,
activation_type='relu',
popart_dtype="FLOAT",
no_dropout=True,
positional_embedding_init_fn="SIMPLIFIED",
inference=True)
popart_model = Bert(config, builder=builder)
shape = (config.max_positional_length, config.hidden_size)
pa_data = popart_model.generate_simplified_periodic_pos_data(
config.dtype, shape)
bb_data = simplified_generator(position_length, hidden_size)
assert(np.all(np.abs(bb_data - pa_data) < 1e-8))
| 2.171875
| 2
|
vision/tests/unit/test_helpers.py
|
DaveCheez/google-cloud-python
| 2
|
12781520
|
<filename>vision/tests/unit/test_helpers.py
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import io
import unittest
import mock
from google.auth.credentials import Credentials
from google.cloud.vision_v1 import ImageAnnotatorClient
from google.cloud.vision_v1 import types
class TestSingleImageHelper(unittest.TestCase):
def setUp(self):
credentials = mock.Mock(spec=Credentials)
self.client = ImageAnnotatorClient(credentials=credentials)
@mock.patch.object(ImageAnnotatorClient, "batch_annotate_images")
def test_all_features_default(self, batch_annotate):
# Set up an image annotation request with no features.
image = types.Image(source={"image_uri": "http://foo.com/img.jpg"})
request = types.AnnotateImageRequest(image=image)
assert not request.features
# Perform the single image request.
self.client.annotate_image(request)
# Evalute the argument sent to batch_annotate_images.
assert batch_annotate.call_count == 1
_, args, kwargs = batch_annotate.mock_calls[0]
# Only a single request object should be sent.
assert len(args[0]) == 1
# Evalute the request object to ensure it looks correct.
request_sent = args[0][0]
all_features = self.client._get_all_features()
assert request_sent.image is request.image
assert len(request_sent.features) == len(all_features)
@mock.patch.object(ImageAnnotatorClient, "batch_annotate_images")
def test_explicit_features(self, batch_annotate):
# Set up an image annotation request with no features.
image = types.Image(source={"image_uri": "http://foo.com/img.jpg"})
request = types.AnnotateImageRequest(
image=image,
features=[
types.Feature(type=1),
types.Feature(type=2),
types.Feature(type=3),
],
)
# Perform the single image request.
self.client.annotate_image(request)
# Evalute the argument sent to batch_annotate_images.
assert batch_annotate.call_count == 1
_, args, kwargs = batch_annotate.mock_calls[0]
# Only a single request object should be sent.
assert len(args[0]) == 1
# Evalute the request object to ensure it looks correct.
request_sent = args[0][0]
assert request_sent.image is request.image
assert len(request_sent.features) == 3
for feature, i in zip(request_sent.features, range(1, 4)):
assert feature.type == i
assert feature.max_results == 0
@mock.patch.object(ImageAnnotatorClient, "batch_annotate_images")
def test_image_file_handler(self, batch_annotate):
# Set up a file handler.
file_ = io.BytesIO(b"bogus==")
# Perform the single image request.
self.client.annotate_image({"image": file_})
# Evaluate the argument sent to batch_annotate_images.
assert batch_annotate.call_count == 1
_, args, kwargs = batch_annotate.mock_calls[0]
# Only a single request object should be sent.
assert len(args[0]) == 1
# Evalute the request object to ensure it looks correct.
request_sent = args[0][0]
assert request_sent["image"]["content"] == b"bogus=="
@mock.patch.object(ImageAnnotatorClient, "batch_annotate_images")
@mock.patch.object(io, "open")
def test_image_filename(self, io_open, batch_annotate):
# Make io.open send back a mock with a read method.
file_ = mock.MagicMock(spec=io.BytesIO)
io_open.return_value = file_
file_.__enter__.return_value = file_
file_.read.return_value = b"imagefile=="
# Perform the single image request using a filename.
self.client.annotate_image({"image": {"source": {"filename": "image.jpeg"}}})
# Establish that my file was opened.
io_open.assert_called_once_with("image.jpeg", "rb")
# Evalute the argument sent to batch_annotate_images.
assert batch_annotate.call_count == 1
_, args, kwargs = batch_annotate.mock_calls[0]
# Only a single request object should be sent.
assert len(args[0]) == 1
# Evalute the request object to ensure it looks correct.
request_sent = args[0][0]
assert request_sent["image"]["content"] == b"imagefile=="
| 2.390625
| 2
|
screen-pass/models.py
|
jmatune/screen-pass
| 0
|
12781521
|
<filename>screen-pass/models.py
import numpy as np
import pandas as pd
from keras.layers import Conv2D, Dense, Dropout
| 1.414063
| 1
|
messages/GetUserIdMessage.py
|
zadjii/nebula
| 2
|
12781522
|
# last generated 2016-10-14 13:51:52.122000
from messages import BaseMessage
from msg_codes import GET_USER_ID as GET_USER_ID
__author__ = 'Mike'
class GetUserIdMessage(BaseMessage):
def __init__(self, username=None):
super(GetUserIdMessage, self).__init__()
self.type = GET_USER_ID
self.username = username
@staticmethod
def deserialize(json_dict):
msg = GetUserIdMessage()
msg.username = json_dict['username']
return msg
| 2.46875
| 2
|
users/models.py
|
thiagoferreiraw/mixapp
| 4
|
12781523
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from events.models import Category, Language, City
import uuid
class UserCategory(models.Model):
user_id = models.ForeignKey(User)
category_id = models.ForeignKey(Category)
class Meta:
unique_together = (("user_id", "category_id"),)
class SignupInvitation(models.Model):
email_invited = models.EmailField()
invited_by = models.ForeignKey(User, null=True)
user_has_signed_up = models.BooleanField(default=False)
created_at = models.DateField(auto_now_add=True, blank=True, editable=True)
hash = models.UUIDField(default=uuid.uuid4, unique=True, editable=True)
def __str__(self):
return self.email_invited + " - " + str(self.hash)
class SignupWaitingList(models.Model):
email = models.EmailField(unique=True)
created_at = models.DateField(auto_now_add=True, blank=True, editable=True)
def __str__(self):
return self.email
class UserLanguage(models.Model):
user_id = models.ForeignKey(User)
language_id = models.ForeignKey(Language)
class Meta:
unique_together = (("user_id", "language_id"),)
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile', on_delete=models.CASCADE)
birth_city = models.ForeignKey(City, related_name="birth_city", null=True)
current_city = models.ForeignKey(City, related_name="current_city", null=True)
birth_date = models.DateField(null=True, blank=True)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| 2.21875
| 2
|
basicprograms/StudentDetails.py
|
poojavaibhavsahu/Pooja_Python
| 0
|
12781524
|
<reponame>poojavaibhavsahu/Pooja_Python
stuname="pooja"
rollno=102
age=25
marks=98.7
print("Student nameis:",stuname,"\n","student roll no is",rollno,"\n",
"student age is",age,"\n","student marks is",marks)
| 2.984375
| 3
|
Curso em video/005.py
|
wandersoncamp/python
| 0
|
12781525
|
<reponame>wandersoncamp/python
n=int(input("Informe um numero: "))
print("Analisando o valor", n, ", seu antecessor é", n-1, ", e seu sucessor é", n+1)
| 3.703125
| 4
|
pedigree/woah2.py
|
careforsometeasir/workexperience2018
| 0
|
12781526
|
<reponame>careforsometeasir/workexperience2018<filename>pedigree/woah2.py
# By <NAME> 2018
import csv
# CSV module is used for various delimiter separated files
# ~~ the comments following PEP8 - comments should start with '# ' - are actual comments
# the rest is code that's been commented out ~~
# Here's the main function for processing a file
def pedigree(filepath: str, *, delimiter='\t') -> dict:
file = open(filepath)
read = csv.reader(file, delimiter=delimiter)
data = list(read)
file.close()
#print(data)
# This is a nice key to swap the various symbols for one that mean the same thing
key = {
'x': 'x',
'*': ' x ',
'/': ' x ',
'(': '(',
'[': '(',
'{': '(',
']': ')',
')': ')',
'}': ')'
}
# This dictionary will contain the formatted data
stuff = {}
# If you're going to run this on a pedigree with 3 columns, the first being line numbers, uncomment the next 2 lines, or comment them to run a pedigree with 2.
#for row in data:
#del row[0]
#print(data)
#print('---------------')
# This for loop replaces substrings in the data using the key
for row in data:
for k, v in key.items():
row[1] = row[1].replace(k, v)
#print(row[1])
# Here an unknown cross is replaced with an empty string
row[1] = row[1].replace('Unknown cross', '')
# This is the mess that replaces substrings for other strings, for the eval() function
pre = '[\'' + row[1].replace(' x ', '\', \'') + '\']'
#print(pre)
nxt = pre.replace(')\'', '\']').replace("\'(", '[\'').replace("\'(", '[\'').replace(') \'', '\']').replace(')\'', '\']')
#if nxt.count('[') > nxt.count(']'):
#nxt = nxt + ']'
#elif nxt.count(']') > nxt.count('['):
#nxt = '[' + nxt
#print(nxt)
# Here the program TRIES to evaluate ( eval() ) the string
try:
stuff[row[0]] = eval(nxt)
# if it's unable to then it sets it to UNABLE TO PARSE
except SyntaxError:
stuff[row[0]] = ['UNABLE TO PARSE']
#print(stuff[row[0]])
#print('---------------')
# final product is returned
return stuff
# I couldn't think of a name for this function
# It is a recurring function that changes the parents so that all of the children have parents like 'name, name'
def badoodle(d: dict, counter=0) -> dict:
# Making a copy of the dictionary to modify, so that I don't modify a dictionary while looping through it
stuff2 = d.copy()
for k in d.keys():
val = d[k]
for i in val:
if type(i) is list:
counter += 1
index = d[k].index(i)
stuff2[k][index] = 'x' + str(counter)
stuff2['x' + str(counter)] = i
# This is quite inefficient but it loops through the list to see if it contains a list
# and then sends it off to itself
for x in i:
if type(i) is list:
stuff2 = badoodle(stuff2, counter)
# returns the result
return stuff2
# COPIED FROM woah.py
def helium(d: dict) -> str:
temp = ""
for key in d.keys():
temp2 = ""
for x in d[key]:
temp2 = temp2 + '\t' + x
temp = temp + key + temp2 + '\n'
return temp
p = badoodle(pedigree('pedigrees2.txt'))
print(p)
# Helium:
#print(helium(p))
# from Ryszard :)
| 3.828125
| 4
|
openpipe/utils/action_loader.py
|
OpenPipe/openpipe
| 2
|
12781527
|
<gh_stars>1-10
"""
This module provides action load and instantiation functions
"""
import traceback
import re
import sys
from sys import stderr
from os import getenv, getcwd
from os.path import splitext, join, dirname, expanduser, abspath
from importlib import import_module
from glob import glob
from wasabi import TracebackPrinter
from .action_config_schema import validate_config_schema
from .action_config import validate_provided_config
def action_module_name(action_name):
""" returns the module name mapped from the action name """
action_words = action_name.split(" ")
package_name = action_words[:-1]
module_name = action_words[-1] + "_"
return "openpipe.actions." + (".".join(package_name + [module_name]))
def load_action_module(action_name, action_label):
""" Load the python module associated with an action name """
action_path = action_module_name(action_name)
tb = TracebackPrinter(tb_base="openpipe", tb_exclude=("core.py",))
try:
action_module = import_module(action_path)
except ModuleNotFoundError as error:
print("Error loading module", action_path, file=stderr)
print("Required for action:", action_label, file=stderr)
error = tb("Module not found:", error.name, tb=traceback.extract_stack())
raise ModuleNotFoundError(error) from None
except ImportError as error:
print("Error loading module", action_path, file=stderr)
print("Required for action:", action_label, file=stderr)
error = tb("ImportError", error.msg, tb=traceback.extract_stack())
raise ImportError(error) from None
if not hasattr(action_module, "Action"):
print(
"Module {} does not provide a Action class!".format(action_module),
file=stderr,
)
print("Required for action:", action_label, file=stderr)
raise NotImplementedError
validate_config_schema(action_module.Action, action_label)
return action_module
def yaml_attribute(text):
text = text.strip("\n")
text = text.rstrip("\n ")
text = re.sub("^ ", " ", text, flags=re.MULTILINE)
return text
def create_action_instance(action_name, action_config, action_label):
""" Create an instance for a module, after validating the provided config"""
action_module = load_action_module(action_name, action_label)
action_class = action_module.Action
action_config = validate_provided_config(action_class, action_label, action_config)
instance = action_class(action_config, action_label)
return instance
def get_action_metadata(action_name, action_label):
""" Get the metadata from an action module """
action_module = load_action_module(action_name, action_label)
action_class = action_module.Action
filename, extension = splitext(action_module.__file__)
filename = filename.strip("_")
test_filename = filename + "_test.yaml"
meta = {
"name": action_name,
"module_name": action_module_name(action_name).replace(".", "/") + ".py",
"purpose": action_module.__doc__.splitlines()[1],
"test_filename": test_filename,
}
for attribute_name in dir(action_class):
if attribute_name[0] == "_": # Skip internal attributes
continue
attribute_value = getattr(action_class, attribute_name) # Skip non strings
if not isinstance(attribute_value, str):
continue
meta[attribute_name] = yaml_attribute(attribute_value)
return meta
def load_actions_paths():
"""
Return a list of paths that should be used too search for action modules.
The following order will be used:
"""
action_search_list = []
# ../actions/
actions_dir = join(dirname(__file__), "..", "actions")
action_search_list.append(actions_dir)
# current directory/openpipe/actions
cwd = getcwd()
actions_dir = join(cwd, "openpipe", "actions")
action_search_list.append(actions_dir)
running_on_tox = getenv("TOX_ENV_NAME", False)
if not running_on_tox:
# ~/openpipe/libraries_cache/*/openpipe/actions
search_pattern = join(
expanduser("~"),
".openpipe",
"libraries_cache",
"*",
"*",
"openpipe",
"actions",
)
for search_dir in glob(search_pattern):
action_search_list.append(search_dir)
# Add them to sys.path for action loading
for action_path in action_search_list:
base_lib_path = abspath(join(action_path, "..", ".."))
sys.path.insert(0, base_lib_path) # We will need to load the module
action_search_list = [abspath(path) for path in action_search_list]
return action_search_list
# We want action paths inserted on PYTHON path for any posterior loading
load_actions_paths()
| 2.390625
| 2
|
dataset/prepare_SIG17.py
|
Susmit-A/FSHDR
| 6
|
12781528
|
import shutil, os, glob
src = 'train'
dst = 'SIG17/train'
if not os.path.exists('SIG17'):
os.mkdir('SIG17')
os.mkdir(dst)
elif not os.path.exists(dst):
os.mkdir(dst)
count = 1
for s in glob.glob(src + '/*'):
d = dst + '/{:03d}'.format(count)
if not os.path.exists(d):
os.mkdir(d)
os.mkdir(d + '/dynamic')
os.mkdir(d + '/static')
shutil.copyfile(s + '/input_1_aligned.tif', d + '/dynamic/le.tif')
shutil.copyfile(s + '/input_2_aligned.tif', d + '/dynamic/me.tif')
shutil.copyfile(s + '/input_3_aligned.tif', d + '/dynamic/he.tif')
shutil.copyfile(s + '/ref_1_aligned.tif', d + '/static/le.tif')
shutil.copyfile(s + '/ref_2_aligned.tif', d + '/static/me.tif')
shutil.copyfile(s + '/ref_3_aligned.tif', d + '/static/he.tif')
shutil.copyfile(s + '/ref_hdr_aligned.hdr', d + '/hdr_gt.hdr')
shutil.copyfile(s + '/input_exp.txt', d + '/input_exp.txt')
print(str(count) + ' folders transferred')
count += 1
src = 'test'
dst = 'SIG17/val'
if not os.path.exists(dst):
os.mkdir(dst)
count = 1
for s in glob.glob(src + '/*'):
d = dst + '/{:03d}'.format(count)
if not os.path.exists(d):
os.mkdir(d)
os.mkdir(d + '/dynamic')
shutil.copyfile(s + '/input_1_aligned.tif', d + '/dynamic/le.tif')
shutil.copyfile(s + '/input_2_aligned.tif', d + '/dynamic/me.tif')
shutil.copyfile(s + '/input_3_aligned.tif', d + '/dynamic/he.tif')
shutil.copyfile(s + '/ref_hdr_aligned.hdr', d + '/hdr_gt.hdr')
shutil.copyfile(s + '/input_exp.txt', d + '/input_exp.txt')
print(str(count) + ' folders transferred')
count += 1
| 2.5
| 2
|
emka_trans/accounts/forms.py
|
mmamica/ERP-system
| 0
|
12781529
|
<reponame>mmamica/ERP-system
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm
from accounts.models import UserProfileInfo
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth import authenticate
import requests
from admin_app.models import Magazine
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password')
class UserProfileInfoForm(forms.ModelForm):
city = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'id': 'city'}))
street = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'id': 'street'}))
house_number = forms.IntegerField(
widget=forms.TextInput(attrs={'id': 'house_number', 'onchange': 'geocode(platform)'}))
def clean(self):
city = self.cleaned_data.get('city')
street = self.cleaned_data.get('street')
house_number = self.cleaned_data.get('house_number')
search_text = city + ' ' + street + ' ' + ' ' + str(house_number)
message = requests.get('https://geocoder.api.here.com/6.2/geocode.json',
{'app_id': 'Z7uukAiQbHvHZ43KIBKW', 'app_code': 'nadFSh5EHBHkTdUQ3YnTEg',
'searchtext': search_text})
data = message.json()
if (data['Response']['View']==[]):
raise forms.ValidationError("Nieprawidłowy adres")
latitude = data['Response']['View'][0]['Result'][0]['Location']['NavigationPosition'][0]['Latitude']
longitude = data['Response']['View'][0]['Result'][0]['Location']['NavigationPosition'][0]['Longitude']
waypoint0 = 'geo!' + str(latitude) + ',' + str(longitude)
waypoint1 = 'geo!' + str(Magazine.objects.get(id_magazine=1).latitude) + ',' + str(
Magazine.objects.get(id_magazine=1).longitude)
message2 = requests.get('https://route.api.here.com/routing/7.2/calculateroute.json',
{'app_id': 'Z7uukAiQbHvHZ43KIBKW', 'app_code': 'nadFSh5EHBHkTdUQ3YnTEg',
'waypoint0': waypoint0, 'waypoint1': waypoint1,
'mode': 'fastest;car;traffic:disabled'})
data2 = message2.json()
distance = data2['response']['route'][0]['summary']['distance']
magazine_distance=Magazine.objects.get(id_magazine=1).radius*1000
if distance > magazine_distance:
raise forms.ValidationError("Firma jest za daleko")
return self.cleaned_data
class Meta():
model = UserProfileInfo
fields = ('company_name', 'phone_number', 'city', 'street', 'house_number', 'profile_pic',
'is_client')
class EditUserForm(forms.ModelForm):
class Meta():
model = User
fields = ('username', 'first_name', 'last_name', 'email')
class EditProfileForm(forms.ModelForm):
class Meta():
model=UserProfileInfo
fields = ('company_name','phone_number','profile_pic')
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, required=True)
password = forms.CharField(widget=forms.PasswordInput, required=True)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Incorrect login or password")
return self.cleaned_data
def login(self, request):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
| 2.078125
| 2
|
col and pos.py
|
01ritvik/whatsapp-chatbox
| 0
|
12781530
|
import pyautogui as pt
from time import sleep
while True:
posXY = pt.position()
print(posXY, pt.pixel(posXY[0],posXY[1]))
sleep(1)
if posXY[0] == 0:
break
# this program will tell axis of the cursor
# program will end if x-axis = 0
| 2.90625
| 3
|
zim_to_md.py
|
xuyuan/zim_to
| 0
|
12781531
|
'''
command for zim custom tool: python path/to/zim_to_md.py -T %T -f %f -D %D
* Markdown format: https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet
'''
import argparse
import pyperclip
from zim.formats import get_parser, StubLinker
from zim.formats.markdown import Dumper as TextDumper
class Dumper(TextDumper):
'''Inherit from html format Dumper class, only overload things that are different'''
def dump_object(self, tag, attrib, strings=None):
if 'type' in attrib:
t = attrib['type']
if t == 'code':
c = attrib.get('lang', "")
return ['```%s\n' % c] + strings + ['```\n']
return super(Dumper, self).dump_object(tag, attrib, strings)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-T', dest='wiki_text', help='the selected text including wiki formatting')
parser.add_argument('-f', dest='file', help='the page source as temporary file')
parser.add_argument('-D', dest='source_dir', help='the document root')
args = parser.parse_args()
zim_parser = get_parser('wiki')
if args.wiki_text:
wiki_text = args.wiki_text
else:
wiki_text = open(args.file).read()
tree = zim_parser.parse(wiki_text)
try:
linker = StubLinker(source_dir=args.source_dir)
dumper = Dumper(linker=linker)
lines = dumper.dump(tree)
textile_text = ''.join(lines).encode('utf-8')
pyperclip.copy(textile_text)
except Exception as e:
pyperclip.copy(e.message)
| 3.03125
| 3
|
simplecv/core/config.py
|
Bobholamovic/SimpleCV
| 44
|
12781532
|
<reponame>Bobholamovic/SimpleCV
from ast import literal_eval
import warnings
class AttrDict(dict):
def __init__(self, **kwargs):
super(AttrDict, self).__init__(**kwargs)
self.update(kwargs)
@staticmethod
def from_dict(dict):
ad = AttrDict()
ad.update(dict)
return ad
def __setitem__(self, key: str, value):
super(AttrDict, self).__setitem__(key, value)
super(AttrDict, self).__setattr__(key, value)
def update(self, config: dict):
for k, v in config.items():
if k not in self:
self[k] = AttrDict()
if isinstance(v, dict):
self[k].update(v)
else:
self[k] = v
def update_from_list(self, str_list: list):
assert len(str_list) % 2 == 0
for key, value in zip(str_list[0::2], str_list[1::2]):
key_list = key.split('.')
item = None
last_key = key_list.pop()
for sub_key in key_list:
item = self[sub_key] if item is None else item[sub_key]
try:
item[last_key] = literal_eval(value)
except ValueError:
item[last_key] = value
warnings.warn('a string value is set to {}'.format(key))
| 2.765625
| 3
|
modelling/scripts/percentdiscrep_marconvert_splitintozones.py
|
bcgov-c/wally
| 0
|
12781533
|
<reponame>bcgov-c/wally<filename>modelling/scripts/percentdiscrep_marconvert_splitintozones.py
import pandas as pd
import csv
# import data
df = pd.read_csv("../data/2_scrape_results/watersheds_quantiles_out_7_16_2021.csv")
# 20 percent discrepancy
indexNames = df[((df['drainage_area'] / df['drainage_area_gross']) - 1).abs() <= 0.2].index
df = df.iloc[indexNames]
# Mar adjustment
df['mean'] = (df['mean'] / df['drainage_area']) * 1000
print(df[['station_number','latitude', 'longitude', 'drainage_area', 'drainage_area_gross']])
# export to files
output_directory = "july16"
df.to_csv(f'../data/4_training/{output_directory}/all_data.csv', index=False, header=True)
# for zone, rows in df.groupby('hydrological_zone'):
# rows.to_csv(f'../data/4_training/{output_directory}/{round(zone)}.csv', index=False, header=True)
| 2.546875
| 3
|
setup.py
|
TommyStarK/apyml
| 1
|
12781534
|
# -*- coding: utf-8 -*-
from setuptools import find_packages
from cx_Freeze import setup, Executable
import apyml
install_requires = [
'cx_Freeze'
'pandas'
]
setup(
name='apyml',
version=apyml.__version__,
packages=find_packages(),
author=apyml.__author__,
author_email='<EMAIL>',
description='Apyml - a Machine learning model building tool for humans.',
long_description=open('README.md').read(),
install_requires=install_requires,
include_package_data=True,
url='https://github.com/TommyStarK/apyml',
classifiers=[
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Operating System :: OS Independent',
],
entry_points={
'console_scripts': [
'apyml = apyml.__main__:main',
],
},
executables=[Executable('app.py')]
)
| 1.460938
| 1
|
neon_utils/skills/common_message_skill.py
|
NeonMariia/neon-skill-utils
| 0
|
12781535
|
<reponame>NeonMariia/neon-skill-utils
# NEON AI (TM) SOFTWARE, Software Development Kit & Application Framework
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2022 Neongecko.com Inc.
# Contributors: <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>
# BSD-3 License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABC, abstractmethod
from enum import IntEnum
from neon_utils.skills.neon_skill import NeonSkill
from neon_utils.logger import LOG
class CMSMatchLevel(IntEnum):
EXACT = 1 # Skill was requested specifically
MEDIA = 2 # Skill can handle the requested type of message (text, email, call, etc)
LOOSE = 3 # Skill can handle a generic message request
class CommonMessageSkill(NeonSkill, ABC):
"""
Skills that handle calls or messaging should be based on this class.
Skills must implement 'CMS_match_query_phrase'
This skill works with Communcation skill which handles intent matching and skill selection
"""
def __init__(self, name=None, bus=None):
super().__init__(name, bus)
def bind(self, bus):
if bus:
super().bind(bus)
self.add_event("communication:request.message", self.__handle_send_message_request)
self.add_event("communication:send.message", self.__handle_send_message)
self.add_event("communication:request.call", self.__handle_place_call_request)
self.add_event("communication:place.call", self.__handle_place_call)
def __handle_place_call_request(self, message):
"""
Handler for a send message request. This is the entry point for this skill to handle a request.
:param message: message generated by Communcation skill
"""
request_string = message.data["request"]
# notify Communcation skill that this skill is working on it
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"searching": True}))
# Calculate match for this skill and respond
result: dict = self.CMS_match_call_phrase(request_string, message.context)
if result:
confidence = self.__calc_confidence(result, result.pop("conf"), message.context)
LOG.debug(f"DM: Response from {self.skill_id} ({message.msg_type})")
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"conf": confidence,
"skill_data": result}))
else:
# Notify not handling request
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"searching": False}))
def __handle_send_message_request(self, message):
"""
Handler for a send message request. This is the entry point for this skill to handle a request.
:param message: message generated by Communcation skill
"""
request_string = message.data["request"]
# notify Communcation skill that this skill is working on it
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"searching": True}))
# Calculate match for this skill and respond
result: dict = self.CMS_match_message_phrase(request_string, message.context)
if result:
confidence = self.__calc_confidence(result, result.pop("conf"), message.context)
LOG.debug(f"DM: Response from {self.skill_id} ({message.msg_type})")
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"conf": confidence,
"skill_data": result}))
else:
# Notify not handling request
self.bus.emit(message.response({"request": request_string,
"skill_id": self.skill_id,
"searching": False}))
def __handle_place_call(self, message):
"""
Callback method that handles when this skill has been selected to respond
:param message: message object associated with request
:return:
"""
if message.data["skill_id"] != self.skill_id:
# Not for this skill!
return
else:
self.CMS_handle_place_call(message)
def __handle_send_message(self, message):
"""
Callback method that handles when this skill has been selected to respond
:param message: message object associated with request
:return:
"""
if message.data["skill_id"] != self.skill_id:
# Not for this skill!
return
else:
self.CMS_handle_send_message(message)
@staticmethod
def __calc_confidence(result_data, level, context):
"""
Translates a confidence level to a 0-1 value
:param result_data (dict): data returned by skill
:param level (CMSMatchLevel): match level
:param context (dict): message context
:return (float): confidence level
"""
# Add 0.1 for each matched element (message, recipeint, subject, etc)
bonus = (len(result_data.keys()) - 1) * 0.1
# Lower confidence for sms requests that can't be handled
if result_data.get("kind") == "sms" and not context.get("mobile"):
bonus = -0.2
# Modify confidences for phone call if mobile can handle request
elif result_data.get("kind") == "call":
if context.get("mobile"):
bonus = 0.2
else:
bonus = -0.2
if level == CMSMatchLevel.EXACT:
return 1.0
elif level == CMSMatchLevel.MEDIA:
return 0.6 + bonus
elif level == CMSMatchLevel.LOOSE:
return 0.3 + bonus
else:
return 0.0
@abstractmethod
def CMS_match_message_phrase(self, request, context):
"""
Checks if this skill can handle the given message request
:param request: (str) user literal request to handle
:param context: (dict) message context
:return: (dict) confidence, data to be used in the callback if skill is selected
"""
return {}
@abstractmethod
def CMS_handle_send_message(self, message):
"""
Handles when this skill has been selected to respond
:param message: message associated with this skills match submission
"""
pass
@abstractmethod
def CMS_match_call_phrase(self, contact, context):
"""
Checks if this skill can handle the given call request
:param contact: (str) requested contact (name or address)
:param context: (dict) message context
:return: (dict) confidence, data to be used in the callback if skill is selected
"""
return {}
@abstractmethod
def CMS_handle_place_call(self, message):
"""
Handles when this skill has been selected to respond
:param message: message associated with this skills match submission
"""
pass
@staticmethod
def _extract_message_content(text):
"""
Generic method that attempts to extract the message content from a request to send a message
:param text: Input string to evaluate
:return: recipient_to_append(prefix), message
"""
if "that says " in text:
recipient_to_append, message = text.split("that says ", 1)
elif "saying " in text:
recipient_to_append, message = text.split("saying ", 1)
elif len(text) <= 1:
message = None
recipient_to_append = text
else:
recipient_to_append = None
message = text
return recipient_to_append, message
| 0.894531
| 1
|
study/vanilla/models.py
|
NunoEdgarGFlowHub/wavetorch
| 470
|
12781536
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class CustomRNN(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1, f_hidden=None):
super(CustomRNN, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.f_hidden = f_hidden
self.W1 = nn.Parameter((torch.rand(hidden_size, input_size)-0.5)*W_scale)
self.W2 = nn.Parameter((torch.rand(hidden_size, hidden_size)-0.5)*W_scale)
self.W3 = nn.Parameter((torch.rand(output_size, hidden_size)-0.5)*W_scale)
self.b_h = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h1 = torch.zeros(x.shape[0], self.hidden_size)
ys = []
for i, xi in enumerate(x.chunk(x.size(1), dim=1)):
h1 = (torch.matmul(self.W2, h1.t()) + torch.matmul(self.W1, xi.t())).t() + self.b_h
if self.f_hidden is not None:
h1 = getattr(F, self.f_hidden)(h1)
y = torch.matmul(self.W3, h1.t()).t()
ys.append(y)
ys = torch.stack(ys, dim=1)
return ys
class CustomRes(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1, f_hidden=None):
super(CustomRes, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.f_hidden = f_hidden
self.W1 = torch.nn.Parameter((torch.rand(hidden_size, input_size)-0.5)*W_scale)
self.W2 = torch.nn.Parameter((torch.rand(hidden_size, hidden_size)-0.5)*W_scale)
self.W3 = torch.nn.Parameter((torch.rand(output_size, hidden_size)-0.5)*W_scale)
self.b_h = torch.nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h1 = torch.zeros(x.shape[0], self.hidden_size)
ys = []
for i, xi in enumerate(x.chunk(x.size(1), dim=1)):
hprev = h1
h1 = (torch.matmul(self.W2, h1.t()) + torch.matmul(self.W1, xi.t())).t() + self.b_h
if self.f_hidden is not None:
h1 = getattr(F, self.f_hidden)(h1)
y = torch.matmul(self.W3, h1.t()).t()
ys.append(y)
h1 = h1 + hprev
ys = torch.stack(ys, dim=1)
return ys
class CustomLSTM(nn.Module):
def __init__(self, input_size, output_size, hidden_size, batch_first=True, W_scale=1e-1):
super(CustomLSTM, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=batch_first)
self.W3 = torch.nn.Parameter((torch.rand(output_size, hidden_size)-0.5))
def forward(self, x):
# out should have size [N_batch, T, N_hidden]
out, hidden = self.lstm(x.unsqueeze(2))
# print(torch.max(x, 1))
# print(x[:, 100])
# print(out[:, 100, 0].detach())
# ys should have size [N_batch, T, N_classes]
ys = torch.matmul(out, self.W3.t())
return ys
| 2.546875
| 3
|
src/larval_gonad/plotting/literature.py
|
jfear/larval_gonad
| 1
|
12781537
|
<reponame>jfear/larval_gonad
from typing import List
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from larval_gonad.io import feather_to_cluster_matrix
from larval_gonad.plotting.common import get_fbgn2symbol
from larval_gonad.plotting.biomarkers import _cleanup_xaxis as _cleanup_xaxis_rep
def plot_lit_evidence_profile(
gene_metadata: str,
lit_evidence: str,
tpm_by_cluster: str,
germ_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> tpm_by_cluster = "output/seurat3-cluster-wf/tpm_by_cluster.feather"
>>> germ_clusters = config["germ"]
>>> plot_lit_expression_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
germ_evidence = _get_lit_evidence(lit_evidence)[["SP", "ES", "MS", "LS"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
binned_expression = _get_binned_expression(tpm_by_cluster)[germ_clusters]
df = germ_evidence.join(binned_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, :4], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, 4:], cmap=["#450457", "#ff7800", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis(ax1), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_zscore_profile(
gene_metadata: str,
lit_evidence: str,
zscore_by_cluster_rep: str,
germ_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> zscore_by_cluster_rep = "output/seurat3-cluster-wf/zscore_by_cluster_rep.feather"
>>> germ_clusters = config["germ"]
>>> plot_lit_expression_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
germ_evidence = _get_lit_evidence(lit_evidence)[["SP", "ES", "MS", "LS"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
zscore_expression = _get_zscore_expression(zscore_by_cluster_rep).loc[:, (germ_clusters, slice(None))]
df = germ_evidence.join(zscore_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, xticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, 4:], cmap='viridis', vmin=-3, vmax=3, ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, :4], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis_rep(ax1, germ_clusters), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_soma_profile(
gene_metadata: str,
lit_evidence: str,
tpm_by_cluster: str,
soma_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> tpm_by_cluster = "output/seurat3-cluster-wf/tpm_by_cluster.feather"
>>> soma_clusters = config["soma"]
>>> plot_lit_expression_soma_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
soma_evidence = _get_lit_evidence(lit_evidence)[["C", "EC", "MC", "LC", "PC", "TE"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
binned_expression = _get_binned_expression(tpm_by_cluster)[soma_clusters]
df = soma_evidence.join(binned_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, :7], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, 7:], cmap=["#450457", "#ff7800", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis(ax1), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_zscore_soma_profile(
gene_metadata: str,
lit_evidence: str,
zscore_by_cluster_rep: str,
soma_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> zscore_by_cluster_rep = "output/seurat3-cluster-wf/zscore_by_cluster_rep.feather"
>>> soma_clusters = config["soma"]
>>> plot_lit_expression_soma_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
soma_evidence = _get_lit_evidence(lit_evidence)[["C", "EC", "MC", "LC", "PC", "TE"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
zscore_expression = _get_zscore_expression(zscore_by_cluster_rep).loc[:, (soma_clusters, slice(None))]
df = soma_evidence.join(zscore_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, xticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, 6:], cmap='viridis', vmin=-3, vmax=3, ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, :6], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis_rep(ax1, soma_clusters), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def _get_lit_evidence(lit_evidence):
return pd.read_csv(lit_evidence, sep="\t", index_col=0).drop("References", axis=1)
def _genes_w_ptraps(lit_evidence):
return (
pd.read_csv(lit_evidence, sep="\t", index_col=0)
.query("References == 'This study'")
.index.tolist()
)
def _get_binned_expression(tpm_by_cluster):
"""Bin expression.
{
0 not expressed: TPM < 1,
1 low expression: 1 ≤ TPM < 5,
2 expressed: 5 ≤ TPM < Inf
}
"""
return feather_to_cluster_matrix(tpm_by_cluster).apply(
lambda x: pd.cut(x, [0, 1, 5, np.inf], labels=[0, 1, 2], right=False, include_lowest=True),
axis=1,
)
def _get_zscore_expression(zscore_by_cluster_rep):
return pd.read_feather(zscore_by_cluster_rep).set_index(["FBgn", "cluster", "rep"]).squeeze().unstack([-2, -1])
def _cleanup_xaxis(ax):
ax.set_xlabel("")
ax.xaxis.set_ticks_position("top")
return ax
def _cleanup_yaxis(ax, this_study):
ax.set_ylabel("")
labels = []
for l in ax.get_yticklabels():
l.set(fontstyle="italic")
if l.get_text() in this_study:
l.set(fontweight="bold")
labels.append(l)
ax.set_yticklabels(labels)
return ax
def _add_legend(ax):
off = mpatches.Patch(color="#450457", label="absent")
# low = mpatches.Patch(color="#ff7800", label="low expression")
high = mpatches.Patch(color="#f8e621", label="present")
none = mpatches.Patch(color="#d3d3d3", label="not analyzed")
ax.legend(loc="upper left", bbox_to_anchor=[1, 1], handles=[off, high, none])
return ax
| 2.4375
| 2
|
views/frames/join_room_wait_frame.py
|
MrKelisi/battle-python
| 0
|
12781538
|
import time
from tkinter import *
from model.battle import ClientBattle
class JoinRoomWaitFrame(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.label = Label(self, font=("Helvetica", 18))
self.label.pack(side="top", pady=20)
self.players = Listbox(self)
self.players.pack(pady=15)
Label(self, text="En attente d'autres joueurs...").pack(pady=3)
def init(self):
def on_players_list_updated(players_names):
self.players.delete(0, END)
self.players.insert(0, self.master.handler.name + ' (vous)')
for name in players_names:
self.players.insert(END, name)
def on_game_begin():
print('== Game begins!')
self.master.battle = ClientBattle(self.master.handler)
time.sleep(1)
self.master.raise_frame('game')
self.master.handler.on_players_list_updated = on_players_list_updated
self.master.handler.on_game_begin = on_game_begin
self.label['text'] = self.master.room_name
| 2.90625
| 3
|
code/test/test_compress.py
|
hackffm/actioncam
| 4
|
12781539
|
import helper_test
from configuration import Configuration
from local_services import Compress
from helper import Helper
configuration = Configuration('actioncam', path=helper_test.config_path())
helper = Helper(configuration.config)
helper.state_set_start()
debug = True
compress = Compress(configuration, helper, debug)
def test_compress_folder_does_not_exist(configuration, helper):
print('test_compress_folder_does_not_exist')
compress.config["compress_location"] = "DoesNotExist"
compressed = compress.compress()
assert 'not found' in compressed, "test_compress_folder_does_not_exist failed"
def test_compress(configuration, helper):
print('test_compress')
file_test = configuration.config["DEFAULT"]["recording_location"] + "/" + configuration.config["DEFAULT"]["identify"] + "_20211113" + "." + configuration.config["DEFAULT"]["output"]
helper.file_touch(file_test)
compressed = compress.compress()
assert 'zip' in compressed, "test_compress failed as no zip found in reply"
helper.file_delete(file_test)
compressed = compress.get_compressed()
print('Report')
for cmp in compressed:
print(cmp)
assert len(compressed) >= 1, "test_compress failed as not compressed found"
if __name__ == '__main__':
test_compress(configuration, helper)
test_compress_folder_does_not_exist(configuration, helper)
| 2.609375
| 3
|
third-party/casadi/experimental/sailboat.py
|
dbdxnuliba/mit-biomimetics_Cheetah
| 8
|
12781540
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# <NAME>. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
import matplotlib.pyplot as plt
import numpy
# Sailboat model based on
#
# [MF2011]:
# <NAME>, <NAME>
# "Tacking Simulation of Sailing Yachts with New Model of Aerodynamic
# Force Variation During Tacking Maneuver"
# Journal of Sailboat Technology, Article 2011-01
#
# <NAME>, UW Madison 2017
#
# Create DaeBuilder instance
dae = DaeBuilder()
# Load file with external functions
from os import path
curr_dir = path.dirname(path.abspath(__file__))
clib = Importer(curr_dir + '/sailboat_c.c', 'none')
# Options for external functions
# NOTE: These options should become redundant once code is more stable
external_opts = dict(enable_jacobian = False, enable_forward = False, \
enable_reverse = False, enable_fd = True)
# Physical constants
g = 9.81 # [m/s^2] gravity
rho = 1027. # p[kg/m^3] density of ocean water
# Sailboat model constants. Cf. Table 1 [MF2011]
L = 8.80 # [m] Length of design waterline
D = 2.02 # [m] Design draft, including fin keel
m = 4410. # [kg] Displacement
GM = 1.45 # [m] metacentric height of boat
m_x = 160.; m_y_hull = 2130.; m_y_sail = 280.; m_z = 12000. # [kg] Added masses
Ixx = 17700.; Iyy = 33100.; Izz = 17200. # [kg m^2] Moments of inertia
Jxx_hull = 7200.; Jxx_sail = 8100.; Jyy = 42400.; Jzz = 6700. # [kg m^2] Added moments of inertia
X_pVV = 3.38e-1
X_pPP = 1.40e-3
X_pVVVV = -1.84
X_pT = -1.91e-2
Y_pV = -5.35e-1
Y_pP = -5.89e-3
Y_pVPP = 7.37e-1
Y_pVVP = -5.53e-1
Y_pVVV = 3.07
Y_pP = 2.19e-1
Y_pT = -4.01e-3
K_pV = 2.80e-1
K_pP = 3.36e-3
K_pVPP = -4.07e-1
K_pVVP = 2.24e-1
K_pVVV = -1.38
K_pT = -3.53e-1
N_pV = -3.23e-2
N_pP = -1.52e-2
N_pVPP = 2.71e-4
N_pVVP = -9.06e-2
N_pVVV = -2.98e-2
N_pT = -5.89e-3
C_Xd = -3.79e-2
C_Yd = -1.80e-1
C_Kd = 9.76e-2
C_Nd = 9.74e-2
# States
U = dae.add_x('U') # Velocity along the X axis
V = dae.add_x('V') # Velocity along the Y axis
phi = dae.add_x('phi') # Roll angle
theta = dae.add_x('theta') # Yaw angle
dphi = dae.add_x('dphi') # Time derivative of phi
dtheta = dae.add_x('dtheta') # Time derivative of theta
# Controls
beta = dae.add_u('beta')
# Sum contributions from hull and sail (?)
m_y = m_y_hull + m_y_sail
Jxx = Jxx_hull + Jxx_sail
# Auxiliary variables
# Squared boat velocity
V_B2 = U**2 + V**2
# To avoid duplicate expressions
cos_phi = cos(phi)
sin_phi = sin(phi)
cos2_phi = cos_phi**2
sin2_phi = sin_phi**2
phi2 = phi**2
# Hull resistance in the upright position
X_0_fun = dae.add_fun('hull_resistance', clib, external_opts)
X_0 = X_0_fun(U)
# Calculate hydrodynamic forces
V_p = sin(beta)
V_p2 = V_p**2
V_p3 = V_p2*V_p
V_p4 = V_p2**2
H_fact = 0.5*rho*V_B2*L*D
dae.add_d('X_H', (X_pVV*V_p2 + X_pPP*phi2 + X_pVVVV*V_p4)*H_fact)
dae.add_d('Y_H', (Y_pV*V_p + Y_pP*phi + Y_pVPP*V_p*phi2 + Y_pVVP*V_p2*phi + Y_pVVV*V_p3)*H_fact)
dae.add_d('K_H', (K_pV*V_p + K_pP*phi + K_pVPP*V_p*phi2 + K_pVVP*V_p2*phi + K_pVVV*V_p3)*H_fact*D)
dae.add_d('N_H', (N_pV*V_p + N_pP*phi + N_pVPP*V_p*phi2 + N_pVVP*V_p2*phi + N_pVVV*V_p3)*H_fact*L)
H = dae.add_fun('H', ['phi', 'beta', 'U', 'V'], ['X_H', 'Y_H', 'K_H', 'N_H'])
# Plot it for reference
ngrid_phi = 100; ngrid_beta = 100
U0 = 5.; V0 = 5. # [m/s] Boat speed for simulation
phi0 = numpy.linspace(-pi/4, pi/4, ngrid_phi)
beta0 = numpy.linspace(-pi/4, pi/4, ngrid_beta)
PHI0,BETA0 = numpy.meshgrid(phi0, beta0)
r = H(phi=PHI0, beta=BETA0, U=U0, V=V0)
for i,c in enumerate(['X_H', 'Y_H', 'K_H', 'N_H']):
plt.subplot(2,2,i+1)
CS = plt.contour(PHI0*180/pi, BETA0*180/pi, log10(r[c]))
plt.clabel(CS, inline=1, fontsize=10)
plt.title('log10(' + c + ')')
plt.grid(True)
# Make a function call
X_H, Y_H, K_H, N_H = H(phi, beta, U, V)
# Hydrodynamic derivatives of the hull due to yawing motion
X_VT = 0. # Neglected
Y_T = 0. # Neglected
N_T = 0. # Neglected
# Derivative due to rolling
Y_P = 0. # Neglected
K_P = 0. # Neglected
# Hydrodynamic forces on the rudder
X_R = 0. # Neglected
Y_R = 0. # Neglected
K_R = 0. # Neglected
N_R = 0. # Neglected
# Sail forces
X_S = 0. # Neglected
Y_S = 0. # Neglected
K_S = 0. # Neglected
N_S = 0. # Neglected
# Surge: (m+m_x)*dot(U) = F_X, cf. (3) [MF2011]
F_X = X_0 + X_H + X_R + X_S \
+ (m + m_y*cos2_phi + m_z*sin2_phi + X_VT)*V*dtheta
dae.add_ode("surge", F_X / (m+m_x))
# Sway: (m + m_y*cos2_phi + m_z*sin2_phi)*dot(V) = F_Y
F_Y = Y_H + Y_P*dphi + Y_T*dtheta + Y_R + Y_S \
- (m + m_x)*U*dtheta \
- 2*(m_z - m_y)*sin_phi*cos_phi*V*dphi
dae.add_ode("sway", F_Y / (m + m_y*cos2_phi + m_z*sin2_phi))
# Roll: (Ixx + Jxx)*dot(dphi) = F_K
F_K = K_H + K_P*dphi + K_R + K_S - m*g*GM*sin_phi \
+ ((Iyy+Jyy)-(Izz+Jzz))*sin_phi*cos_phi*dtheta**2
dae.add_ode("roll", F_K / (Ixx + Jxx))
# Yaw: ((Iyy+Jyy)*sin2_phi + (Izz+Jzz)*cos2_phi)*dot(dtheta) = F_N
F_N = N_H + N_T*dtheta + N_R + N_S \
-2*((Iyy+Jyy)-(Izz+Jzz))*sin_phi*cos_phi*dtheta*dphi
dae.add_ode("yaw", F_N / ((Iyy+Jyy)*sin2_phi + (Izz+Jzz)*cos2_phi))
# Roll angle
dae.add_ode("roll_angle", dphi)
# Yaw angle
dae.add_ode("yaw_angle", dtheta)
# Print ODE
print(dae)
# Generate Jacobian of ODE rhs w.r.t. to states and control
Jfcn = dae.create("Jfcn", ['x', 'u'], ['jac_ode_x', 'jac_ode_u'])
Jfcn_file = Jfcn.generate()
print('Jacobian function saved to ' + Jfcn_file)
plt.show()
| 2.078125
| 2
|
p731_my_calendar_ii.py
|
feigaochn/leetcode
| 0
|
12781541
|
<reponame>feigaochn/leetcode<filename>p731_my_calendar_ii.py
#!/usr/bin/env python
# coding: utf-8
class MyCalendarTwo:
def __init__(self):
self.events = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
events = self.events[::]
events.append((start, 1))
events.append((end, -1))
events.sort()
count = 0
for (ts, value) in events:
count += value
if count >= 3:
return False
self.events = events
return True
if __name__ == '__main__':
obj = MyCalendarTwo()
print(obj.book(10, 20))
print(obj.book(50, 60))
print(obj.book(10, 40))
print(obj.book(5, 15))
print(obj.book(5, 10))
print(obj.book(25, 55))
| 3.859375
| 4
|
tests/test_fixtures.py
|
khusrokarim/ward
| 0
|
12781542
|
<reponame>khusrokarim/ward
from ward import expect, fixture, test
from ward.fixtures import Fixture, FixtureCache
@fixture
def exception_raising_fixture():
def i_raise_an_exception():
raise ZeroDivisionError()
return Fixture(fn=i_raise_an_exception)
@test("FixtureCache.cache_fixture can store and retrieve a single fixture")
def _(f=exception_raising_fixture):
cache = FixtureCache()
cache.cache_fixture(f)
expect(cache[f.key]).equals(f)
| 2.421875
| 2
|
binance_trading_bot/owl.py
|
virtueistheonlygood/trading-analysis-bot
| 1
|
12781543
|
from binance_trading_bot import utilities, visual, indicator
import matplotlib.pyplot as plt
plt.style.use('classic')
from matplotlib.ticker import FormatStrFormatter
import matplotlib.patches as mpatches
import math
def volume_spread_analysis(client, market,
NUM_PRICE_STEP, TIME_FRAME_STEP, TIME_FRAME, TIME_FRAME_DURATION):
nDigit = abs(int(math.log10(float(client.get_symbol_info(market)['filters'][0]['tickSize']))))
candles = utilities.get_candles(client, market, TIME_FRAME, TIME_FRAME_DURATION)
VRVP = indicator.volume_profile(client, market, NUM_PRICE_STEP, TIME_FRAME_STEP, TIME_FRAME_DURATION)
BBANDS = indicator.bbands(candles)
VSTOP = indicator.volatility_stop(candles, 20, 2)
RSI = indicator.rsi(candles, 14)
SMA = indicator.sma(candles)
# Visualization
VSTOP_COLOR = 'indigo'
SMA_COLOR = 'black'
BBANDS_COLOR = 'green'
VOLUME_COLOR = 'gray'
BUY_COLOR = 'black'
SELL_COLOR = 'red'
VOLATILITY_COLOR = 'black'
RSI_COLOR = 'black'
f,axes = plt.subplots(4, 1, gridspec_kw={'height_ratios':[3, 1, 1, 1]})
f.set_size_inches(20,20)
ax = axes[0]
axt = ax.twiny()
axt.barh(VRVP['price'],
VRVP['buy_volume'],
color='gray',
edgecolor='w',
height=VRVP['price'][1]-VRVP['price'][0],
align='center',
alpha=0.25)
axt.barh(VRVP['price'],
VRVP['buy_volume']+VRVP['sell_volume'],
color='gray',
edgecolor='w',
height=VRVP['price'][1]-VRVP['price'][0],
align='center',
alpha=0.25)
axt.set_xticks([])
for tic in axt.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
visual.candlestick2_ohlc(ax,
candles['open'],
candles['high'],
candles['low'],
candles['close'],
width=0.6, alpha=1)
ax.plot(VSTOP['support'], linewidth=2, color=VSTOP_COLOR, linestyle='-')
ax.plot(VSTOP['resistance'], linewidth=2, color=VSTOP_COLOR, linestyle='-')
ax.plot(BBANDS['middle_band'], linewidth=1, color=BBANDS_COLOR, linestyle='-')
ax.plot(BBANDS['upper_band'], linewidth=1, color=BBANDS_COLOR, linestyle='-')
ax.plot(BBANDS['lower_band'], linewidth=1, color=BBANDS_COLOR, linestyle='-')
ax.plot(SMA, linewidth=1, color=SMA_COLOR, linestyle='--')
if market=='BTCUSDT':
pivotList = []
for i in range(len(VSTOP)):
if math.isnan(VSTOP['support'].iat[i]):
if not math.isnan(VSTOP['support'].iat[i-1]):
pivotList.append(VSTOP['support'].iat[i-1])
if math.isnan(VSTOP['resistance'].iat[i]):
if not math.isnan(VSTOP['resistance'].iat[i-1]):
pivotList.append(VSTOP['resistance'].iat[i-1])
pivotList = sorted(pivotList)
for pivot in pivotList:
ax.text(len(candles)+.5, pivot, str(int(pivot)))
ax.yaxis.grid(True)
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.set_xticks([])
ax.set_yticks(VRVP['price_min'].append(VRVP['price_max'].tail(1)))
ax.set_xlim(-.5, len(candles))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.'+str(nDigit)+'f'))
ax.get_yaxis().set_label_coords(-0.075,0.5)
ax.set_ylabel("Price",fontsize=20)
ax.set_title(market+' '+TIME_FRAME.upper(), fontsize=30, y=1.03, loc='left')
patchList = [mpatches.Patch(color=VOLUME_COLOR, label='market-profile'),
mpatches.Patch(color=VSTOP_COLOR, label='volatility-stop'),
mpatches.Patch(color=BBANDS_COLOR, label='bollinger-bands'),
mpatches.Patch(color=SMA_COLOR, label='moving-average')]
ax.legend(handles=patchList, loc='best', prop={'size': 20}, ncol=len(patchList),framealpha=0.5)
ax = axes[1]
visual.candlestick2_ohlc(ax,
0*candles['assetVolume'],
candles['assetVolume'],
0*candles['assetVolume'],
candles['assetVolume'],
width=0.6, alpha=.35)
visual.candlestick2_ohlc(ax,
0*candles['buyAssetVolume'],
candles['buyAssetVolume'],
0*candles['buyAssetVolume'],
candles['buyAssetVolume'],
width=0.28, alpha=1, shift=-0.15)
visual.candlestick2_ohlc(ax,
candles['sellAssetVolume'],
candles['sellAssetVolume'],
0*candles['sellAssetVolume'],
0*candles['sellAssetVolume'],
width=0.28, alpha=1, shift=+0.15)
ax.yaxis.grid(True)
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.set_xticks([])
ax.set_xlim(-.5, len(candles))
ax.get_yaxis().set_label_coords(-0.075,0.5)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.get_xaxis().set_label_coords(0.5, -0.025)
ax.set_ylabel("Volume",fontsize=20)
patchList = [mpatches.Patch(color=VOLUME_COLOR, label='volume'),
mpatches.Patch(color=BUY_COLOR, label='buy-volume'),
mpatches.Patch(color=SELL_COLOR, label='sell-volume')]
ax.legend(handles=patchList, loc='best', prop={'size': 20}, ncol=len(patchList), framealpha=0.5)
ax = axes[2]
visual.candlestick2_ohlc(ax,
0*candles['spread'],
candles['spread'],
0*candles['spread'],
candles['spread'],
width=0.6, colorup=VOLATILITY_COLOR, alpha=.35)
ax.yaxis.grid(True)
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.set_xticks([])
ax.set_xlim(-.5, len(candles))
ax.get_yaxis().set_label_coords(-0.075,0.5)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.'+str(nDigit)+'f'))
ax.get_xaxis().set_label_coords(0.5, -0.025)
ax.set_ylabel("Volatility",fontsize=20)
patchList = [mpatches.Patch(color=VOLATILITY_COLOR, label='average-true-range'),
mpatches.Patch(color=BBANDS_COLOR, label='standard-deviation')]
ax.legend(handles=patchList, loc='best', prop={'size': 20}, ncol=len(patchList), framealpha=0.5)
axt = ax.twinx()
axt.plot(BBANDS['std'], linewidth=2, color=BBANDS_COLOR, linestyle='-')
for tic in axt.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
axt.set_xticks([])
axt.set_yticks([])
axt.set_xlim(-.5, len(candles))
axt = ax.twinx()
axt.plot(VSTOP['ATR'], linewidth=2, color=VOLATILITY_COLOR, linestyle='-')
for tic in axt.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
axt.set_xticks([])
axt.set_xlim(-.5, len(candles))
ax = axes[3]
ax.plot(RSI, linewidth=2, color=RSI_COLOR, linestyle='-')
ax.axhline(y=50, color=RSI_COLOR, linestyle='--')
ax.axhspan(ymin=20, ymax=80, color=RSI_COLOR, alpha=0.1)
ax.axhspan(ymin=30, ymax=70, color=RSI_COLOR, alpha=0.1)
ax.yaxis.grid(True)
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.set_xticks([])
ax.set_xlim(-.5, len(candles))
ax.get_yaxis().set_label_coords(-0.075,0.5)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.'+str(nDigit)+'f'))
ax.get_xaxis().set_label_coords(0.5, -0.025)
ax.set_ylabel("Momentum",fontsize=20)
patchList = [mpatches.Patch(color=RSI_COLOR, label='relative-strength')]
ax.legend(handles=patchList, loc='best', prop={'size': 20}, ncol=len(patchList), framealpha=0.5)
f.tight_layout()
plt.savefig('img/'+market+'_'+TIME_FRAME.upper()+'.png', bbox_inches='tight')
| 2.46875
| 2
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch03_recursion/solutions/ex02_digits.py
|
Kreijeck/learning
| 0
|
12781544
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by <NAME>
def count_digits(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return 1
# rekursiver Abstieg
return count_digits(value // 10) + 1
def count_digits_shorter(value):
return sum([1 for _ in str(value)])
def count_digits_tricky(value):
return len(str(value))
def calc_sum_of_digits(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return value
remainder = value // 10
last_digit = value % 10
# rekursiver Abstieg
return calc_sum_of_digits(remainder) + last_digit
def calc_sum_of_digits_divmod(value):
if value < 0:
raise ValueError("value must be >= 0")
# rekursiver Abbruch
if value < 10:
return value
remainder, last_digit = divmod(value, 10)
# rekursiver Abstieg
return calc_sum_of_digits(remainder) + last_digit
def calc_sum_of_digits_shorter(value):
return sum([int(ch) for ch in str(value)])
def main():
print(count_digits(72))
print(count_digits(7271))
print(count_digits(72))
print(count_digits(7271))
print(calc_sum_of_digits(72))
print(calc_sum_of_digits(7271))
print(calc_sum_of_digits(123456))
print(calc_sum_of_digits_shorter(72))
print(calc_sum_of_digits_shorter(7271))
print(calc_sum_of_digits_shorter(123456))
if __name__ == "__main__":
main()
| 3.90625
| 4
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-77.py
|
ivenpoker/Python-Projects
| 1
|
12781545
|
<reponame>ivenpoker/Python-Projects<gh_stars>1-10
# !/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Test whether system is big-endian platform or little-endian #
# platform. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : August 17, 2019 #
# #
#######################################################################################
import sys
if __name__ == "__main__":
if sys.byteorder == "little":
# intel, alpha
print("Little-endian platform.")
else:
# motorola, sparc
print("Big-endian platform.")
print()
| 2.546875
| 3
|
Crawler/src/crawl/crawler.py
|
cam626/RPEye-Link-Analysis
| 2
|
12781546
|
<filename>Crawler/src/crawl/crawler.py
""" This script the function to crawl links.
Good test cases & their behaviors:
https://.com -- crawl_link returns an error -- none of the excepts catch this
https://rpi.edu.com -- takes a long time to respond
"""
import urllib.request
from urllib.error import URLError, HTTPError, ContentTooShortError
import validators
def crawl_link(link):
""" Crawls a link, and returns a tuple with a urllib response and a code
indicating success or error
Input:
string -- link to be claled
Returns:
Tuple(e1,e2) --
e1 = urllib response
e2 = success or error code.
Code Source: https://docs.python.org/3/howto/urllib2.html ('Fetching URLs')
"""
if not validators.url(link):
return ("", False, "Invalid link")
print("Currently handling link: " + link)
req = urllib.request.Request(link)
try:
response = urllib.request.urlopen(req)
except HTTPError as error:
return (None, False, error.code)
except ContentTooShortError as error:
return (None, False, "ContentTooShortError")
except URLError as error:
return (None, False, error.reason)
else:
return (response, True, "")
# Tests for functions in crawler.py
if __name__ == "__main__":
print(crawl_link("https://a.com"))
| 3.625
| 4
|
back_end/test/b64_decode.py
|
Wizard-J/Jsite
| 0
|
12781547
|
<reponame>Wizard-J/Jsite<gh_stars>0
import base64
path = "E:\\project\\JSite\\front_end\\src\\statics\\img\\sidebar-bg.jpg"
with open(path,"rb") as f:
img = f.read()
# print(base64.b64encode(img))
encoded = base64.b64encode(img)
with open("./sidebar-bg-b64","wb") as coded:
coded.write(encoded)
| 1.984375
| 2
|
python/code_challenges/fizz_buzz_tree/fizz_buzz_tree/fizz_buzz_tree.py
|
HishamKhalil1990/data-structures-and-algorithms
| 0
|
12781548
|
<filename>python/code_challenges/fizz_buzz_tree/fizz_buzz_tree/fizz_buzz_tree.py
from .tree import BinaryTree,Queue,EmptyTreeException,Tree_Node
def Fizz_Buzz_Tree(Binary_tree):
def fuzz_buzz(value):
if value % 3 == 0 and value % 5 == 0:
return 'FizzBuzz'
elif value % 3 == 0:
return "Fizz"
elif value % 5 == 0:
return "Buzz"
else:
return value
root = Binary_tree.root
if not root:
raise EmptyTreeException
new_root = Tree_Node(root.value)
new_tree = BinaryTree(new_root)
roots = Queue()
roots.enqueue(root)
new_root.value = fuzz_buzz(new_root.value)
new_rootsque = Queue()
new_rootsque.enqueue(new_root)
def inner_func(roots):
for num in range(len(roots)):
left = roots.peek().left
right = roots.peek().right
roots.dequeue()
if left:
new_left = Tree_Node(fuzz_buzz(left.value))
new_rootsque.peek().left = new_left
roots.enqueue(left)
new_rootsque.enqueue(new_left)
if right:
new_right = Tree_Node(fuzz_buzz(right.value))
new_rootsque.peek().right = new_right
roots.enqueue(right)
new_rootsque.enqueue(new_right)
new_rootsque.dequeue()
if len(roots) > 0:
inner_func(roots)
inner_func(roots)
return new_tree
if __name__ == "__main__":
node1 = Tree_Node(1)
node1.left = Tree_Node(13)
node1.right = Tree_Node(27)
node1.left.left = Tree_Node(33)
node1.left.right = Tree_Node(56)
node1.left.right.left = Tree_Node(9)
node1.left.right.right = Tree_Node(17)
node1.right.right = Tree_Node(15)
node1.right.right.left = Tree_Node(37)
binary_tree = BinaryTree(node1)
new_bt = Fizz_Buzz_Tree(binary_tree)
print(new_bt.breadth_first(new_bt))
print(binary_tree.breadth_first(binary_tree))
print(binary_tree.pre_order())
print(new_bt.pre_order())
print(binary_tree.in_order())
print(new_bt.in_order())
print(binary_tree.post_order())
print(new_bt.post_order())
node1 = Tree_Node(3) # ---> h0 root
node1.left = Tree_Node(8) # ---> h1 left
node1.right = Tree_Node(5) # ---> h1 right
node1.left.left = Tree_Node(2) # ---> h2 left root 8
node1.left.right = Tree_Node(6) # ---> h2 right root 8
node1.right.left = Tree_Node(10) # ---> h2 left root 5
node1.right.right = Tree_Node(9) # ---> h2 right root 5
node1.left.left.left = Tree_Node(24) # ---> h3 left root 2
node1.left.left.right = Tree_Node(34) # ---> h3 right root 2
node1.left.right.left = Tree_Node(5) # ---> h3 left root 6
node1.left.right.right = Tree_Node(11) # ---> h3 right root 6
node1.right.right.left = Tree_Node(4) # ---> h3 left root 9
node1.right.right.right = Tree_Node(15) # ---> h3 right root 9
binary_tree = BinaryTree(node1)
new_bt = Fizz_Buzz_Tree(binary_tree)
print(new_bt.breadth_first(new_bt))
print(binary_tree.breadth_first(binary_tree))
| 3.765625
| 4
|
JChipClient.py
|
mbuckaway/CrossMgr
| 1
|
12781549
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# JChipClient.py: JChip simulator program for testing JChip interface and CrossMgr.
#
# Copyright (C) <NAME>, 2012.
import os
import time
import xlwt
import socket
import random
import operator
import datetime
#------------------------------------------------------------------------------
# CrossMgr's port and socket.
DEFAULT_PORT = 53135
DEFAULT_HOST = '127.0.0.1'
#------------------------------------------------------------------------------
# JChip delimiter (CR, **not** LF)
CR = u'\r'
NumberOfStarters = 50
#------------------------------------------------------------------------------
# Create some random rider numbers.
random.seed( 10101010 )
nums = [n for n in range(1,799+1)]
random.shuffle( nums )
nums = nums[:NumberOfStarters]
#------------------------------------------------------------------------------
# Create a JChip-style hex tag for each number.
tag = {n: u'41AA%03X' % n for n in nums }
tag[random.choice(list(tag.keys()))] = u'E2001018860B01290700D0D8'
tag[random.choice(list(tag.keys()))] = u'E2001018860B01530700D138'
tag[random.choice(list(tag.keys()))] = u'E2001018860B01370700D0F8'
tag[random.choice(list(tag.keys()))] = u'1'
tag[random.choice(list(tag.keys()))] = u'2'
#------------------------------------------------------------------------
def getRandomData( starters ):
firstNames = '''
1. Noah
2. Liam
3. Jacob
4. Mason
5. William
6. Ethan
7. Michael
8. Alexander
9. Jayden
10. Daniel
11. Elijah
12. Aiden
13. James
14. Benjamin
15. Matthew
16. Jackson
17. Logan
18. David
19. Anthony
20. Joseph
21. Joshua
22. Andrew
23. Lucas
24. Gabriel
25. Samuel
26. Christopher
27. John
28. Dylan
29. Isaac
30. Ryan
31. Nathan
32. Carter
33. Caleb
34. Luke
35. Christian
36. Hunter
37. Henry
38. Owen
39. Landon
40. Jack
41. Wyatt
42. Jonathan
43. Eli
44. Isaiah
45. Sebastian
46. Jaxon
47. Julian
48. Brayden
49. Gavin
50. Levi
51. Aaron
52. Oliver
53. Jordan
54. Nicholas
55. Evan
56. Connor
57. Charles
58. Jeremiah
59. Cameron
60. Adrian
61. Thomas
62. Robert
63. Tyler
64. Colton
65. Austin
66. Jace
67. Angel
68. Dominic
69. Josiah
70. Brandon
71. Ayden
72. Kevin
73. Zachary
74. Parker
75. Blake
76. Jose
77. Chase
78. Grayson
79. Jason
80. Ian
81. Bentley
82. Adam
83. Xavier
84. Cooper
85. Justin
86. Nolan
87. Hudson
88. Easton
89. Jase
90. Carson
91. Nathaniel
92. Jaxson
93. Kayden
94. Brody
95. Lincoln
96. Luis
97. Tristan
98. Damian
99. Camden
100. Juan
'''
lastNames = '''
1. Smith
2. Johnson
3. Williams
4. Jones
5. Brown
6. Davis
7. Miller
8. Wilson
9. Moore
10. Taylor
11. Anderson
12. Thomas
13. Jackson
14. White
15. Harris
16. Martin
17. Thompson
18. Garcia
19. Martinez
20. Robinson
21. Clark
22. Rodriguez
23. Lewis
24. Lee
25. Walker
26. Hall
27. Allen
28. Young
29. Hernandez
30. King
31. Wright
32. Lopez
33. Hill
34. Scott
35. Green
36. Adams
37. Baker
38. Gonzalez
39. Nelson
40. Carter
41. Mitchell
42. Perez
43. Roberts
44. Turner
45. Phillips
46. Campbell
47. Parker
48. Evans
49. Edwards
50. Collins
51. Stewart
52. Sanchez
53. Morris
54. Rogers
55. Reed
56. Cook
57. Morgan
58. Bell
59. Murphy
60. Bailey
61. Rivera
62. Cooper
63. Richardson
64. Cox
65. Howard
66. Ward
67. Torres
68. Peterson
69. Gray
70. Ramirez
71. James
72. Watson
73. Brooks
74. Kelly
75. Sanders
76. Price
77. Bennett
78. Wood
79. Barnes
80. Ross
81. Henderson
82. Coleman
83. Jenkins
84. Perry
85. Powell
86. Long
87. Patterson
88. Hughes
89. Flores
90. Washington
91. Butler
92. Simmons
93. Foster
94. Gonzales
95. Bryant
96. Alexander
97. Russell
98. Griffin
99. Diaz
'''
teams = '''
The Cyclomaniacs
Pesky Peddlers
Geared Up
Spoke & Mirrors
The Handlebar Army
Wheels of Steel
The Chaingang
Saddled & Addled
The Cyclepaths
Tour de Farce
Old Cranks
Magically Bikelicious
Look Ma No Hands!
Pedal Pushers
Kicking Asphault
Velociposse
Road Rascals
Spin Doctors
'''
firstNames = [line.split('.')[1].strip() for line in firstNames.split('\n') if line.strip()]
lastNames = [line.split('.')[1].strip() for line in lastNames.split('\n') if line.strip()]
teams = [line.strip() for line in teams.split('\n') if line.strip()]
bibs = [n for n in range(1,1+starters)]
random.shuffle( firstNames )
random.shuffle( lastNames )
random.shuffle( teams )
for i in range(starters):
yield bibs[i], firstNames[i%len(firstNames)], lastNames[i%len(lastNames)], teams[i%len(teams)]
#------------------------------------------------------------------------------
# Write out as a .xlsx file with the number tag data.
#
wb = xlwt.Workbook()
ws = wb.add_sheet( "JChipTest" )
for col, label in enumerate(u'Bib#,LastName,FirstName,Team,Tag,StartTime'.split(',')):
ws.write( 0, col, label )
rdata = [d for d in getRandomData(len(tag))]
rowCur = 1
for r, (n, t) in enumerate(tag.items()):
if t in ('1', '2'):
continue
bib, firstName, lastName, Team = rdata[r]
for c, v in enumerate([n, lastName, firstName, Team, t, 5*rowCur/(24.0*60.0*60.0)]):
ws.write( rowCur, c, v )
rowCur += 1
wb.save('JChipTest.xls')
wb = None
#------------------------------------------------------------------------------
# Also write out as a .csv file.
#
with open('JChipTest.csv', 'w') as f:
f.write( u'Bib#,Tag,dummy3,dummy4,dummy5\n' )
for n in nums:
f.write( '{},{}\n'.format(n, tag[n]) )
sendDate = True
#------------------------------------------------------------------------------
# Function to format number, lap and time in JChip format
# Z413A35 10:11:16.4433 10 10000 C7
count = 0
def formatMessage( n, lap, t ):
global count
message = u"DJ%s %s 10 %05X C7%s%s" % (
tag[n], # Tag code
t.strftime('%H:%M:%S.%f'), # hh:mm:ss.ff
count, # Data index number in hex.
' date={}'.format( t.strftime('%Y%m%d') ) if sendDate else '',
CR
)
count += 1
return message
#------------------------------------------------------------------------------
# Generate some random lap times.
random.seed()
numLapTimes = []
mean = 60.0 # Average lap time.
varFactor = 9.0 * 4.0
var = mean / varFactor # Variance between riders.
lapMax = 61
for n in nums:
lapTime = random.normalvariate( mean, mean/(varFactor * 4.0) )
for lap in range(0, lapMax+1):
numLapTimes.append( (n, lap, lapTime*lap) )
numLapTimes.sort( key = operator.itemgetter(1, 2) ) # Sort by lap, then race time.
#------------------------------------------------------------------------------
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#------------------------------------------------------------------------------
# Connect to the CrossMgr server.
iMessage = 1
while 1:
print( u'Trying to connect to server...' )
while 1:
try:
sock.connect((DEFAULT_HOST, DEFAULT_PORT))
break
except:
print( u'Connection failed. Waiting 5 seconds...' )
time.sleep( 5 )
#------------------------------------------------------------------------------
print( u'Connection succeeded!' )
name = u'{}-{}'.format(socket.gethostname(), os.getpid())
print( u'Sending name...', name )
message = u"N0000{}{}".format(name, CR)
sock.send( message.encode() )
#------------------------------------------------------------------------------
print( u'Waiting for get time command...' )
while 1:
received = sock.recv(1).decode()
if received == u'G':
while received[-1] != CR:
received += sock.recv(1).decode()
print( u'Received cmd: "%s" from CrossMgr' % received[:-1] )
break
#------------------------------------------------------------------------------
dBase = datetime.datetime.now()
dBase -= datetime.timedelta( seconds = 13*60+13.13 ) # Send the wrong time for testing purposes.
#------------------------------------------------------------------------------
print( u'Send gettime data...' )
# format is GT0HHMMSShh<CR> where hh is 100's of a second. The '0' (zero) after GT is the number of days running and is ignored by CrossMgr.
message = u'GT0%02d%02d%02d%02d%s%s' % (
dBase.hour, dBase.minute, dBase.second, int((dBase.microsecond / 1000000.0) * 100.0),
u' date={}'.format( dBase.strftime('%Y%m%d') ) if sendDate else u'',
CR)
print( message[:-1] )
sock.send( message.encode() )
#------------------------------------------------------------------------------
print( u'Waiting for send command from CrossMgr...' )
while 1:
received = sock.recv(1).decode()
if received == u'S':
while received[-1] != CR:
received += sock.recv(1).decode()
print( u'Received cmd: "%s" from CrossMgr' % received[:-1] )
break
#------------------------------------------------------------------------------
print( u'Start sending data...' )
while iMessage < len(numLapTimes):
n, lap, t = numLapTimes[iMessage]
dt = t - numLapTimes[iMessage-1][2]
time.sleep( dt )
message = formatMessage( n, lap, dBase + datetime.timedelta(seconds = t - 0.5) )
if iMessage & 15 == 0:
print( u'sending: {}: {}\n'.format(iMessage, message[:-1]) )
try:
sock.send( message.encode() )
iMessage += 1
except:
print( u'Disconnected. Attempting to reconnect...' )
break
if iMessage >= len(numLapTimes):
message = u'<<<GarbageTerminateMessage>>>' + CR
sock.send( message.encode() )
break
| 2.4375
| 2
|
blockchain/settings.py
|
sdediego/blockchain-api-client
| 2
|
12781550
|
#!/usr/bin/env python
# encoding: utf-8
# Settings for Blockchain API project
# Configure Blockchain API resources
RESOURCES = {
'charts': 'BlockchainAPIChart',
'stats': 'BlockchainAPIStatistics',
'pools': 'BlockchainAPIPool',
}
# Blockchain Charts
CHARTS = [
# Bitcoin stats
'total-bitcoins',
'market-price',
'market-cap',
'trade-volume',
# Blockchain stats
'blocks-size',
'avg-block-size',
'n-transactions-per-block',
'median-confirmation-time',
# Mining stats
'hash-rate',
'difficulty',
'miners-revenue',
'transaction-fees',
'transaction-fees-usd',
'cost-per-transaction-percent',
'cost-per-transaction',
# Network activity
'n-unique-addresses',
'n-transactions',
'n-transactions-total',
'transactions-per-second',
'mempool-size',
'utxo-count',
'output-volume',
'estimated-transaction-volume',
'estimated-transaction-volume-usd',
]
| 1.382813
| 1
|