hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64053180a94832d8869b58084f1530b41aef4032 | 14,379 | py | Python | modules/coins/network.py | blakebjorn/tuxpay | 9bcbb309d6d41add9a2224f69efa7d525d829c95 | [
"MIT"
] | null | null | null | modules/coins/network.py | blakebjorn/tuxpay | 9bcbb309d6d41add9a2224f69efa7d525d829c95 | [
"MIT"
] | null | null | null | modules/coins/network.py | blakebjorn/tuxpay | 9bcbb309d6d41add9a2224f69efa7d525d829c95 | [
"MIT"
] | null | null | null | import asyncio
import datetime
import json
import warnings
from pathlib import Path
from typing import Tuple, Optional, TYPE_CHECKING
import aiorpcx
from modules import config
from modules.electrum_mods.functions import BIP32Node, pubkey_to_address, address_to_script, \
script_to_scripthash, constants
from modules.electrumx import ElectrumX, ElectrumError
from modules.helpers import inv_dict, timestamp
from modules.logging import logger
from modules.models import database, Payment, Invoice
from modules.webhooks import send_webhook
if TYPE_CHECKING:
from modules.electrum_mods import PartialTransaction
class CoinNetwork(constants.AbstractNet):
symbol = ""
name = ""
segwit = True
address_format = "p2wpkh"
bip21_intent = "bitcoin:"
kraken = ""
electrumX = None
decimals = 8
watched_payments = {}
TESTNET = False
SIGHASH_FLAG = 0x00000001
TX_VERSION = 2
XPRV_HEADERS = {}
XPUB_HEADERS = {}
WIF_PREFIX = None
BIP44_COIN_TYPE = -1
PEER_DEFAULT_PORTS = {'t': 50001, 's': 50002}
def __init__(self):
self.electrumX = ElectrumX(self.symbol, default_ports=self.PEER_DEFAULT_PORTS)
self.XPRV_HEADERS_INV = inv_dict(self.XPRV_HEADERS)
self.XPUB_HEADERS_INV = inv_dict(self.XPUB_HEADERS)
self._block_queue = None
self._block_lock = None
self._relayfee: Optional[Tuple[int, float]] = None
self._current_block: Optional[int] = None
self._current_feerate: Optional[Tuple[int, float]] = None
self._svg_icon = None
xpub = config.xpubs.get(self.symbol)
if xpub is None:
warnings.warn(f"No xPub added for {self.symbol} - address generation disabled")
self.xpub_node = None
self.xpub_derivation = None
else:
self.xpub_node = BIP32Node.from_xkey(xpub['xpub'], net=self)
self.xpub_derivation = xpub['derivation_path']
@property
def icon(self):
if self._svg_icon is not None:
return self._svg_icon
filename = Path(__file__).parent / 'icons' / f"{self.symbol[1:] if self.TESTNET else self.symbol}.svg"
if filename.exists():
self._svg_icon = filename.read_text()
else:
self._svg_icon = ""
return self._svg_icon
@property
def icon_b64(self):
return self._svg_icon
def sats_to_coin(self, sats: Optional[int]) -> Optional[float]:
if sats is None:
return None
return round(sats * 10 ** -self.decimals, self.decimals)
def coin_to_sats(self, amt):
return int(round(amt * 10 ** self.decimals))
@property
def root_symbol(self):
if self.TESTNET:
return self.symbol[1:]
return self.symbol
@property
def default_xpub_derivation_path(self):
return "/".join(['84h' if self.segwit else '44h',
'1h' if self.TESTNET else f"{self.BIP44_COIN_TYPE}h",
'0h'])
@property
async def relay_fee(self):
res = await self.electrum_call(ElectrumX.blockchain_relayfee, [])
assert res > 0
# check response
relay_fee = int(res * 10 ** self.decimals)
relay_fee = max(0, relay_fee)
return relay_fee
@property
async def current_block(self):
if self._block_lock is None:
self._block_lock = asyncio.Lock()
async with self._block_lock:
if self._current_block is not None:
return self._current_block
self._block_queue = asyncio.Queue()
ret = await self.electrumX.call(ElectrumX.blockchain_headers_subscribe, [], self._block_queue)
self._current_block = int(ret[0]['height'])
asyncio.create_task(self.watch_blocks())
return self._current_block
async def watch_blocks(self):
while True:
ret = await self._block_queue.get()
try:
self.electrumX.validate_elextrumx_call(ElectrumX.blockchain_headers_subscribe, ret)
except ElectrumError:
await self.electrumX.penalize_server()
continue
logger.info(f"{self.symbol} - new block @ {ret[0]['height']}")
self._current_block = int(ret[0]['height'])
@property
async def current_feerate(self):
# Cached feerate
if self._current_feerate is not None and self._current_feerate[0] == (await self.current_block):
return self._current_feerate[1]
fee_estimate = await self.electrum_call(ElectrumX.blockchain_estimatefee, [1])
if fee_estimate and fee_estimate > 0:
# Convert to sat/byte
fee_estimate /= 1024
fee_estimate *= 10 ** self.decimals
else:
# Fallback to coin default
fee_estimate = float(self.config('fallback_feerate'))
return fee_estimate
def config(self, key, default=None):
"""
Shorthand to get config entries for the parent coin, equivalent to `modules.config.get(key, coin=self.symbol)`
"""
return config.get(key, coin=self.symbol, default=default)
async def get_transactions(self, script_hash, ignored_tx_hashes=None):
transactions = {}
history = await self.electrum_call(ElectrumX.blockchain_scripthash_get_history, [script_hash])
for tx in history:
tx_hash = tx.get("tx_hash")
if ignored_tx_hashes and tx_hash in ignored_tx_hashes:
continue
transactions[tx_hash] = await self.electrum_call(ElectrumX.blockchain_transaction_get, [tx_hash, True])
if 'fee' in tx:
transactions[tx_hash]['mempool_fee'] = tx['fee']
return transactions
async def watch_payment(self, payment: dict):
logger.info(f"Watching payment {payment['uuid']}")
queue = asyncio.Queue()
self.watched_payments[payment['uuid']] = payment
original_payment = payment.copy()
first_loop = True
awaiting_mempool = True
awaiting_confirmations = True
current_block = await self.current_block
script_hash = payment['scripthash']
ignored_tx_hashes = set()
while True:
if awaiting_mempool:
if first_loop:
first_loop = False
logger.info(f"Subscribing to scripthash")
await self.electrum_call(ElectrumX.blockchain_scripthash_subscribe, [script_hash], queue)
else:
logger.info(f"Waiting for scripthash changes")
await queue.get()
logger.info(f"Done waiting")
elif awaiting_confirmations:
logger.info("waiting for new block")
while current_block == await self.current_block:
await asyncio.sleep(1)
current_block = await self.current_block
else:
logger.info(f"Finished watching payment {payment['uuid']}")
break
logger.info(f"Payment Update: {payment['uuid']} - {script_hash}")
all_transactions = await self.get_transactions(script_hash, ignored_tx_hashes=ignored_tx_hashes)
# Check if "received"
valid_tx = []
for x in all_transactions.values():
if 'time' not in x:
valid_tx.append(x)
elif datetime.datetime.utcfromtimestamp(x.get('time', 0) or 0) > payment['creation_date']:
valid_tx.append(x)
else:
ignored_tx_hashes.add(x.get("tx_hash"))
payment['transactions'] = valid_tx
mempool_sats = 0
chain_sats = 0
confirmed_sats = 0
req_confirmations = int(self.config('required_confirmations', default=6))
for tx in valid_tx:
for vout in tx['vout']:
if "addresses" in vout['scriptPubKey']:
addresses = vout['scriptPubKey']['addresses']
elif "address" in vout['scriptPubKey']:
addresses = [vout['scriptPubKey']['address']]
else:
raise ElectrumError("No Addresses in vout")
for addr in addresses:
if addr == payment['address']:
sats = int(round(vout['value'] * 10 ** self.decimals))
mempool_sats += sats
confirmations = tx.get("confirmations", 0)
# If instantsend lock is present, treat as if 1 confirmation
if confirmations == 0 and tx.get("instantlock"):
warnings.warn("test instantlock")
confirmations = 1
if confirmations > 0 or req_confirmations == 0:
chain_sats += sats
if confirmations >= req_confirmations:
if req_confirmations == 0 and confirmations == 0:
warnings.warn("Check zeroconf fees")
# CHECK IF mempool_fee is greater than the coins current next-block feerate
mempool_fee = tx.get("mempool_fee")
# If ElectrumX doesn't return this it will need to get calculated manually
confirmed_sats += sats
if datetime.datetime.utcnow() > payment['expiry_date'] and payment['status'] == "pending":
payment['status'] = "expired"
payment['last_update'] = timestamp()
awaiting_confirmations = False
else:
if confirmed_sats >= payment['amount_sats']:
awaiting_confirmations = False
if payment['status'] != "confirmed":
payment['status'] = "confirmed"
payment['payment_date'] = payment['payment_date'] or datetime.datetime.utcnow()
payment['paid_amount_sats'] = payment['paid_amount_sats'] or mempool_sats
payment['last_update'] = timestamp()
if mempool_sats >= payment['amount_sats']:
if payment['status'] == 'pending':
payment['status'] = 'paid'
payment['payment_date'] = payment['payment_date'] or datetime.datetime.utcnow()
payment['paid_amount_sats'] = payment['paid_amount_sats'] or mempool_sats
payment['last_update'] = timestamp()
if awaiting_mempool:
if payment['status'] in ('expired', 'confirmed') or chain_sats > payment['amount_sats']:
awaiting_mempool = False
await self.unsubscribe_electrumx("blockchain.scripthash.unsubscribe",
[payment['scripthash']], queue)
changes = {k: payment[k] for k, v in original_payment.items() if v != payment[k] and k != "transactions"}
if (tx_serialized := json.dumps(valid_tx)) != original_payment.get('transactions'):
changes['transactions'] = tx_serialized
if changes:
await database.execute(Payment.update()
.where(Payment.c.id == payment['id'])
.values(**changes))
# If the payment changes, the invoice should as well, calculate the invoice status now
invoice = await database.fetch_one(Invoice.select().where(Invoice.c.id == payment['invoice_id']))
invoice = dict(invoice)
original_invoice = invoice.copy()
if payment['status'] == 'confirmed' and invoice['status'] in ('pending', 'paid'):
invoice['status'] = "confirmed"
elif payment['status'] == 'paid' and invoice['status'] == 'pending':
invoice['status'] = "paid"
if invoice['status'] != original_invoice['status']:
if config.get("payment_callback_url"):
asyncio.create_task(send_webhook(invoice, payment))
await database.execute(Invoice.update().where(Invoice.c.id == invoice['id']).values(
**{"status": invoice['status'], "payment_date": payment['payment_date']}))
if invoice['status'] == 'confirmed':
if config.check("email_notifications", namespace="EMAIL"):
from modules.email import email_invoice
asyncio.create_task(email_invoice(invoice))
self.watched_payments[payment['uuid']] = payment
def make_address(self, xpub_node: BIP32Node = None, account=0, index=0) -> str:
assert isinstance(index, int) and index >= 0
assert isinstance(account, int) and account >= 0
xpub_node = xpub_node or self.xpub_node
subkey = xpub_node.subkey_at_public_derivation(f"/{account}/{index}")
pubkey = subkey.eckey.get_public_key_bytes(compressed=True).hex()
return pubkey_to_address(self.address_format, pubkey, net=self)
def address_to_script(self, address):
return address_to_script(address, net=self)
def address_to_scripthash(self, address):
script = self.address_to_script(address)
return script_to_scripthash(script)
async def electrum_call(self, method, params=None, queue=None):
return await self.electrumX.call(method, params, queue=queue)
async def unsubscribe_electrumx(self, method, params=None, queue=None):
assert queue is not None
logger.info(f"Unsubscribing from {method} {params}")
await self.electrumX.get_session()
try:
await self.electrumX.session.send_request(method, params)
except aiorpcx.RPCError:
# not all servers implement this
pass
self.electrumX.unsubscribe(queue)
def estimate_tx_size(self, tx: 'PartialTransaction'):
return tx.estimated_size(self)
| 41.921283 | 118 | 0.583073 | 13,757 | 0.956742 | 0 | 0 | 2,221 | 0.154461 | 10,216 | 0.710481 | 2,232 | 0.155226 |
64060e07a2b23b4ba1835382c050f6f85a690bf8 | 3,834 | py | Python | tests/acquisition/covidcast/test_direction_updater.py | sgsmob/delphi-epidata | 801f2729ea80b42891aa282c858c489a47049082 | [
"MIT"
] | null | null | null | tests/acquisition/covidcast/test_direction_updater.py | sgsmob/delphi-epidata | 801f2729ea80b42891aa282c858c489a47049082 | [
"MIT"
] | null | null | null | tests/acquisition/covidcast/test_direction_updater.py | sgsmob/delphi-epidata | 801f2729ea80b42891aa282c858c489a47049082 | [
"MIT"
] | null | null | null | """Unit tests for direction_updater.py."""
# standard library
import argparse
import unittest
from unittest.mock import MagicMock
# py3tester coverage target
__test_target__ = 'delphi.epidata.acquisition.covidcast.direction_updater'
class UnitTests(unittest.TestCase):
"""Basic unit tests."""
def test_get_argument_parser(self):
"""Return a parser for command-line arguments."""
self.assertIsInstance(get_argument_parser(), argparse.ArgumentParser)
def test_main_successful(self):
"""Run the main program, and successfully commit changes."""
args = MagicMock(partitions=[0, 1])
mock_database = MagicMock()
fake_database_impl = lambda: mock_database
mock_update_loop = MagicMock()
main(
args,
database_impl=fake_database_impl,
update_loop_impl=mock_update_loop)
self.assertTrue(mock_update_loop.called)
self.assertTrue(mock_database.connect.called)
self.assertTrue(mock_database.disconnect.called)
self.assertTrue(mock_database.disconnect.call_args[0][0])
def test_main_unsuccessful(self):
"""Run the main program, but don't commit changes on failure."""
args = MagicMock(partitions=[0, 1])
mock_database = MagicMock()
fake_database_impl = lambda: mock_database
mock_update_loop = MagicMock(side_effect=Exception('testing'))
with self.assertRaises(Exception):
main(
args,
database_impl=fake_database_impl,
update_loop_impl=mock_update_loop)
self.assertTrue(mock_update_loop.called)
self.assertTrue(mock_database.connect.called)
self.assertTrue(mock_database.disconnect.called)
self.assertFalse(mock_database.disconnect.call_args[0][0])
def test_update_loop(self):
"""Update direction for out-of-date covidcast rows."""
mock_direction_impl = MagicMock()
mock_direction_impl.scan_timeseries.return_value = ([10, 11], [20, 21])
mock_database = MagicMock()
mock_database.get_keys_with_potentially_stale_direction.return_value = [
(
'source',
'signal',
'geo_type',
'geo_value',
100,
200,
20200401,
20200423,
123,
)
]
mock_database.get_data_stdev_across_locations.return_value = [
('source', 'signal', 'geo_type', 456),
]
mock_database.get_daily_timeseries_for_direction_update.return_value = [
(1, 2, 3, 4, 5),
]
update_loop(mock_database, direction_impl=mock_direction_impl)
self.assertTrue(mock_direction_impl.scan_timeseries.called)
args = mock_direction_impl.scan_timeseries.call_args[0]
self.assertEqual(args[:-1], ([1], [2], [3], [4], [5]))
# call the direction classifier
get_direction_impl = args[-1]
get_direction_impl('x', 'y')
self.assertTrue(mock_direction_impl.get_direction.called)
args, kwargs = mock_direction_impl.get_direction.call_args
self.assertEqual(args, ('x', 'y'))
expected_kwargs = {
'n': Constants.SLOPE_STERR_SCALE,
'limit': 456 * Constants.BASE_SLOPE_THRESHOLD,
}
self.assertEqual(kwargs, expected_kwargs)
self.assertEqual(mock_database.update_direction.call_count, 2)
call_args_list = mock_database.update_direction.call_args_list
expected_args = (
'source', 'signal', 'day', 'geo_type', 10, 'geo_value', 20,
)
self.assertEqual(call_args_list[0][0], expected_args)
expected_args = (
'source', 'signal', 'day', 'geo_type', 11, 'geo_value', 21,
)
self.assertEqual(call_args_list[1][0], expected_args)
self.assertTrue(mock_database.update_timeseries_direction_updated_timestamp.called)
args = mock_database.update_timeseries_direction_updated_timestamp.call_args[0]
expected_args = ('source', 'signal', 'day', 'geo_type', 'geo_value')
self.assertEqual(args, expected_args)
| 32.769231 | 87 | 0.708659 | 3,596 | 0.937924 | 0 | 0 | 0 | 0 | 0 | 0 | 644 | 0.167971 |
6407f13b7469cb39d99bb96b0754b0f7bf912032 | 1,388 | py | Python | main.py | stspbu/repository-downloader | 345da223377ca55f0acae93ea689cc6bb044a721 | [
"WTFPL"
] | 3 | 2021-07-01T12:00:41.000Z | 2021-10-06T21:05:59.000Z | main.py | stspbu/repository-downloader | 345da223377ca55f0acae93ea689cc6bb044a721 | [
"WTFPL"
] | null | null | null | main.py | stspbu/repository-downloader | 345da223377ca55f0acae93ea689cc6bb044a721 | [
"WTFPL"
] | null | null | null | import requests
import os
import logging
import subprocess
import re
import settings
_GITHUB_BASE_URL = 'https://api.github.com'
_REPO_DIR = settings.get('repo_dir', os.getcwd())
_REPO_CNT = settings.get('repo_count', 10)
_QUERY_STRING = settings.get('repo_query_string')
_TOKEN = settings.get('github_token', required=False)
def main():
logging.warning('Starting')
page_num = 1
visited_repo_cnt = 1
while True:
if visited_repo_cnt > _REPO_CNT:
break
headers = {'Authorization': f'token {_TOKEN}'} if _TOKEN else None
r = requests.get(f'{_GITHUB_BASE_URL}/search/repositories?'
f'q={_QUERY_STRING}&page={page_num}&per_page=1',
headers=headers)
data = r.json()
items = data['items']
for item in items:
url = item['clone_url']
repo_name = re.sub('/', '---', item['full_name'])
args = ['git', 'clone', url, os.path.join(_REPO_DIR, repo_name)]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.communicate()
logging.warning(f'Visited repo={repo_name} [{visited_repo_cnt}/{_REPO_CNT}]')
visited_repo_cnt += 1
if visited_repo_cnt > _REPO_CNT:
break
page_num += 1
logging.warning('Done')
if __name__ == '__main__':
main()
| 25.236364 | 89 | 0.598703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.241354 |
6408b07c5f3fe2b043a2035f0c421921305e61ad | 7,074 | py | Python | p2p_python/tool/share.py | yoosofan/p2p-python | f8822d5c9765007e2785a4b357e33a40e382a704 | [
"MIT"
] | null | null | null | p2p_python/tool/share.py | yoosofan/p2p-python | f8822d5c9765007e2785a4b357e33a40e382a704 | [
"MIT"
] | null | null | null | p2p_python/tool/share.py | yoosofan/p2p-python | f8822d5c9765007e2785a4b357e33a40e382a704 | [
"MIT"
] | null | null | null | #!/user/env python3
# -*- coding: utf-8 -*-
import threading
import time
import os.path
from hashlib import sha256
import bjson
import logging
import random
from binascii import hexlify
from ..config import C, V, PeerToPeerError
from ..client import FileReceiveError, ClientCmd
from .utils import AESCipher
class FileShare:
def __init__(self, pc, path):
self.pc = pc
self.name = os.path.split(path)[1]
self.path = path
self.f_contain = list()
self.content = dict()
@staticmethod
def create_ley():
return AESCipher.create_key()
def share_raw_file(self, pwd=None):
if not os.path.exists(self.path):
raise FileExistsError('Not found file.')
if not os.path.isfile(self.path):
raise Exception('It\'s a directory.')
h_list = list()
sha_hash = sha256()
with open(self.path, mode='br') as f:
while True:
raw = f.read(C.MAX_RECEIVE_SIZE)
if not raw:
break
sha_hash.update(raw)
if pwd:
raw = AESCipher.encrypt(key=pwd, raw=raw)
h_list.append(sha256(raw).digest())
self.pc.share_file(data=raw)
self.content = {
'name': self.name,
'path': self.path,
'size': os.path.getsize(self.path) / 1000,
'element': h_list,
'hash': sha_hash.hexdigest(),
'signer': None,
'sign': None,
'date': time.strftime('%Y-%m-%d %H:%M:%S'),
'time': int(time.time())}
def load_share_file(self):
if len(self.content) != 0:
raise Exception('Already loaded share file.')
with open(self.path, mode='br') as f:
self.content = bjson.load(fp=f)
self.f_contain = [False] * len(self.content['element'])
self.name = self.content['name']
self.path = self.content['path']
def recode_raw_file(self, recode_dir, pwd=None, overwrite=False):
if not os.path.exists(recode_dir):
raise FileNotFoundError('Not found recode dir.')
recode_path = os.path.join(recode_dir, self.name)
if os.path.exists(recode_path) and not overwrite:
raise FileExistsError('You try to overwrite file.')
check = self.check()
if len(check) > 0:
complete = str(round(len(check) / len(self.f_contain) * 100, 2))
raise FileNotFoundError('Isn\'t all file downloaded, ({}% complete)'.format(complete))
sha_hash = sha256()
with open(recode_path, mode='ba') as f:
for h in self.content['element']:
raw = self.pc.get_file(file_hash=hexlify(h).decode())
if pwd:
raw = AESCipher.decrypt(key=pwd, enc=raw)
sha_hash.update(raw)
f.write(raw)
if sha_hash.hexdigest() != self.content['hash']:
raise Exception('SHA256 hash don\'t match.')
def recode_share_file(self, path=None, overwrite=False, compress=False):
if path is None:
path = self.path + '.share'
if os.path.exists(path) and not overwrite:
raise FileExistsError('You try to over write file.')
with open(path, mode='bw') as f:
bjson.dump(self.content, fp=f, compress=compress)
def get_all_binary(self, pwd=None):
result = b''
check = self.check()
sha_hash = sha256()
if len(check) > 0:
complete = str(round(len(check) / len(self.f_contain) * 100, 2))
raise FileNotFoundError('Isn\'t all file downloaded, ({}% complete)'.format(complete))
for h in self.content['element']:
raw = self.pc.get_file(file_hash=hexlify(h).decode())
if pwd:
raw = AESCipher.decrypt(key=pwd, enc=raw)
sha_hash.update(raw)
result += raw
if sha_hash.hexdigest() != self.content['hash']:
raise Exception('SHA256 hash don\'t match.')
return result
def check(self):
# return uncompleted element index
return [i for i in range(len(self.f_contain)) if not self.f_contain[i]]
def remove_sharefile_related(self):
for hash_bin in self.content['element']:
self.pc.remove_file(hexlify(hash_bin).decode())
def get_tmp_files(self):
# return [(path, size, time), ...]
files = list()
for f in os.listdir(self.pc.tmp_dir):
path = os.path.join(self.pc.tmp_dir, f)
if not f.startswith('file.'):
continue
if not os.path.isfile(path):
continue
size = os.path.getsize(path)
date = os.path.getmtime(path)
files.append((path, size, date))
return files
def download(self, num=3, wait=True):
if 'element' not in self.content:
return False
request = [i for i in range(len(self.content['element'])) if not self.f_contain[i]]
lock = threading.Lock()
threads = list()
f_finish = [None] * num
for n in range(num):
t = threading.Thread(
target=self.__download, args=(request, f_finish, lock), name='FileShare', daemon=True)
t.start()
threads.append(t)
time.sleep(1)
if wait:
for t in threads:
t.join()
else:
return request, f_finish
def __download(self, request, f_finish, lock):
allow_fail = max(5, len(request) // 1000)
while True:
# check retry counts
if allow_fail < 0:
f_finish.pop()
return
# get index, hash to try
with lock:
try:
i = random.choice(request)
request.remove(i)
except IndexError:
f_finish.pop()
return
hex_hash = hexlify(self.content['element'][i]).decode()
logging.debug("Try %d=0x%s" % (i, hex_hash))
retry = 5
while True:
try:
raw = self.pc.get_file(file_hash=hex_hash, only_check=False)
if raw:
with lock:
self.f_contain[i] = True
logging.debug("Success %d=0x%s" % (i, hex_hash))
break
else:
raise FileReceiveError('Failed get file, retry')
except (FileReceiveError, TimeoutError) as e:
retry -= 1
if retry > 0:
time.sleep(5)
continue
else:
logging.info("Failed %d=0x%s" % (i, hex_hash))
import traceback
traceback.print_exc()
allow_fail -= 1
break
| 36.65285 | 102 | 0.523325 | 6,762 | 0.955895 | 0 | 0 | 73 | 0.010319 | 0 | 0 | 721 | 0.101923 |
6408dbf78d13c9dddae31edaaf6d36de53538562 | 5,455 | py | Python | sbinn/sbinn_tf.py | lu-group/sbinn | 46a9d27d4bbc0a24fb4599364c4e91d7346c6060 | [
"Apache-2.0"
] | 5 | 2022-02-09T21:30:40.000Z | 2022-03-04T19:40:57.000Z | sbinn/sbinn_tf.py | lu-group/sbinn | 46a9d27d4bbc0a24fb4599364c4e91d7346c6060 | [
"Apache-2.0"
] | 5 | 2022-02-09T23:10:37.000Z | 2022-03-24T18:27:59.000Z | sbinn/sbinn_tf.py | lu-group/sbinn | 46a9d27d4bbc0a24fb4599364c4e91d7346c6060 | [
"Apache-2.0"
] | 1 | 2022-03-05T15:37:50.000Z | 2022-03-05T15:37:50.000Z | import numpy as np
import deepxde as dde
from deepxde.backend import tf
import variable_to_parameter_transform
def sbinn(data_t, data_y, meal_t, meal_q):
def get_variable(v, var):
low, up = v * 0.2, v * 1.8
l = (up - low) / 2
v1 = l * tf.tanh(var) + l + low
return v1
E_ = dde.Variable(0.0)
tp_ = dde.Variable(0.0)
ti_ = dde.Variable(0.0)
td_ = dde.Variable(0.0)
k_ = dde.Variable(0.0)
Rm_ = dde.Variable(0.0)
a1_ = dde.Variable(0.0)
C1_ = dde.Variable(0.0)
C2_ = dde.Variable(0.0)
C4_ = dde.Variable(0.0)
C5_ = dde.Variable(0.0)
Ub_ = dde.Variable(0.0)
U0_ = dde.Variable(0.0)
Um_ = dde.Variable(0.0)
Rg_ = dde.Variable(0.0)
alpha_ = dde.Variable(0.0)
beta_ = dde.Variable(0.0)
var_list_ = [
E_,
tp_,
ti_,
td_,
k_,
Rm_,
a1_,
C1_,
C2_,
C4_,
C5_,
Ub_,
U0_,
Um_,
Rg_,
alpha_,
beta_,
]
def ODE(t, y):
Ip = y[:, 0:1]
Ii = y[:, 1:2]
G = y[:, 2:3]
h1 = y[:, 3:4]
h2 = y[:, 4:5]
h3 = y[:, 5:6]
Vp = 3
Vi = 11
Vg = 10
E = (tf.tanh(E_) + 1) * 0.1 + 0.1
tp = (tf.tanh(tp_) + 1) * 2 + 4
ti = (tf.tanh(ti_) + 1) * 40 + 60
td = (tf.tanh(td_) + 1) * 25 / 6 + 25 / 3
k = get_variable(0.0083, k_)
Rm = get_variable(209, Rm_)
a1 = get_variable(6.6, a1_)
C1 = get_variable(300, C1_)
C2 = get_variable(144, C2_)
C3 = 100
C4 = get_variable(80, C4_)
C5 = get_variable(26, C5_)
Ub = get_variable(72, Ub_)
U0 = get_variable(4, U0_)
Um = get_variable(90, Um_)
Rg = get_variable(180, Rg_)
alpha = get_variable(7.5, alpha_)
beta = get_variable(1.772, beta_)
f1 = Rm * tf.math.sigmoid(G / (Vg * C1) - a1)
f2 = Ub * (1 - tf.math.exp(-G / (Vg * C2)))
kappa = (1 / Vi + 1 / (E * ti)) / C4
f3 = (U0 + Um / (1 + tf.pow(tf.maximum(kappa * Ii, 1e-3), -beta))) / (Vg * C3)
f4 = Rg * tf.sigmoid(alpha * (1 - h3 / (Vp * C5)))
dt = t - meal_t
IG = tf.math.reduce_sum(
0.5 * meal_q * k * tf.math.exp(-k * dt) * (tf.math.sign(dt) + 1),
axis=1,
keepdims=True,
)
tmp = E * (Ip / Vp - Ii / Vi)
dIP_dt = dde.grad.jacobian(y, t, i=0, j=0)
dIi_dt = dde.grad.jacobian(y, t, i=1, j=0)
dG_dt = dde.grad.jacobian(y, t, i=2, j=0)
dh1_dt = dde.grad.jacobian(y, t, i=3, j=0)
dh2_dt = dde.grad.jacobian(y, t, i=4, j=0)
dh3_dt = dde.grad.jacobian(y, t, i=5, j=0)
return [
dIP_dt - (f1 - tmp - Ip / tp),
dIi_dt - (tmp - Ii / ti),
dG_dt - (f4 + IG - f2 - f3 * G),
dh1_dt - (Ip - h1) / td,
dh2_dt - (h1 - h2) / td,
dh3_dt - (h2 - h3) / td,
]
geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])
# Observes
n = len(data_t)
idx = np.append(
np.random.choice(np.arange(1, n - 1), size=n // 5, replace=False), [0, n - 1]
)
observe_y2 = dde.PointSetBC(data_t[idx], data_y[idx, 2:3], component=2)
np.savetxt("glucose_input.dat", np.hstack((data_t[idx], data_y[idx, 2:3])))
data = dde.data.PDE(geom, ODE, [observe_y2], anchors=data_t)
net = dde.maps.FNN([1] + [128] * 3 + [6], "swish", "Glorot normal")
def feature_transform(t):
t = 0.01 * t
return tf.concat(
(t, tf.sin(t), tf.sin(2 * t), tf.sin(3 * t), tf.sin(4 * t), tf.sin(5 * t)),
axis=1,
)
net.apply_feature_transform(feature_transform)
def output_transform(t, y):
idx = 1799
k = (data_y[idx] - data_y[0]) / (data_t[idx] - data_t[0])
b = (data_t[idx] * data_y[0] - data_t[0] * data_y[idx]) / (
data_t[idx] - data_t[0]
)
linear = k * t + b
factor = tf.math.tanh(t) * tf.math.tanh(idx - t)
return linear + factor * tf.constant([1, 1, 1e2, 1, 1, 1]) * y
net.apply_output_transform(output_transform)
model = dde.Model(data, net)
firsttrain = 10000
callbackperiod = 1000
maxepochs = 1000000
model.compile("adam", lr=1e-3, loss_weights=[0, 0, 0, 0, 0, 0, 1e-2])
model.train(epochs=firsttrain, display_every=1000)
model.compile(
"adam",
lr=1e-3,
loss_weights=[1, 1, 1e-2, 1, 1, 1, 1e-2],
external_trainable_variables=var_list_,
)
variablefilename = "variables.csv"
variable = dde.callbacks.VariableValue(
var_list_, period=callbackperiod, filename=variablefilename
)
losshistory, train_state = model.train(
epochs=maxepochs, display_every=1000, callbacks=[variable]
)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
gluc_data = np.hsplit(np.loadtxt("glucose.dat"), [1])
meal_data = np.hsplit(np.loadtxt("meal.dat"), [4])
t = gluc_data[0]
y = gluc_data[1]
meal_t = meal_data[0]
meal_q = meal_data[1]
sbinn(
t[:1800],
y[:1800],
meal_t,
meal_q,
)
variable_to_parameter_transform.variable_file(10000, 1000, 1000000, "variables.csv")
| 29.327957 | 88 | 0.498808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.021448 |
640924f10b1d56697c6fdd50eb6f54c032ea499e | 308 | py | Python | services/web/server/src/simcore_service_webserver/session.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/web/server/src/simcore_service_webserver/session.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/web/server/src/simcore_service_webserver/session.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | """ Session submodule
"""
from aiohttp import web
from servicelib.session import get_session
from servicelib.session import setup_session as do_setup_session
def setup(app: web.Application):
do_setup_session(app)
# alias
setup_session = setup
__all__ = (
"setup_session",
"get_session",
)
| 14.666667 | 64 | 0.746753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.198052 |
640a9d4160e1e902532bad6cb3c3e5185242dc67 | 11,852 | py | Python | photometa/forms.py | Flantropy/photometalizer | 3ccfba4a8783f310a33d28c614252cb07bc48536 | [
"MIT"
] | null | null | null | photometa/forms.py | Flantropy/photometalizer | 3ccfba4a8783f310a33d28c614252cb07bc48536 | [
"MIT"
] | null | null | null | photometa/forms.py | Flantropy/photometalizer | 3ccfba4a8783f310a33d28c614252cb07bc48536 | [
"MIT"
] | null | null | null | from django.core.exceptions import ValidationError
from django.core.validators import FileExtensionValidator
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms import Form, ModelForm, FileInput
from django.forms.fields import *
from captcha.fields import CaptchaField
from .models import Image
from exif import (
WhiteBalance,
ColorSpace,
ExposureMode,
SceneCaptureType,
SensingMethod,
MeteringMode,
)
def validate_image_size(image: InMemoryUploadedFile):
print(image.size)
file_size = image.size
limit = 10 * 1024 * 1024 # 10M
if file_size > limit:
raise ValidationError(f'Максимальный размер файла {limit // (1024 * 1024)}M')
class ImageUploadForm(ModelForm):
ALLOWED_EXTENSIONS = ['jpg', 'jpeg']
captcha = CaptchaField(label='')
img = ImageField(
label='Фото',
widget=FileInput(attrs={'multiple': True}),
validators=[
FileExtensionValidator(allowed_extensions=ALLOWED_EXTENSIONS),
validate_image_size
],
help_text=f'Доступные расщирения: {ALLOWED_EXTENSIONS}',
)
class Meta:
model = Image
fields = ['img']
class ExifEditorForm(Form):
make = CharField(help_text='Hint', required=False)
white_balance = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in WhiteBalance],
help_text='Баланс белого',
required=False)
model = CharField(help_text='Hint', required=False)
software = CharField(help_text='Hint', required=False)
focal_length = FloatField(help_text='Фокусное расстояние', required=False)
color_space = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in ColorSpace],
help_text='Hint',
required=False)
exposure_mode = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in ExposureMode],
help_text='Hint',
required=False)
scene_capture_type = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in SceneCaptureType],
help_text='Hint',
required=False)
x_resolution = FloatField(help_text='Hint', required=False)
y_resolution = FloatField(help_text='Hint', required=False)
digital_zoom_ratio = FloatField(help_text='Hint', required=False)
custom_rendered = IntegerField(help_text='Hint', required=False)
sensing_method = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in SensingMethod],
help_text='Hint',
required=False)
metering_mode = TypedChoiceField(
coerce=int,
choices=[(x.value, x.name) for x in MeteringMode],
help_text='Hint',
required=False)
compressed_bits_per_pixel = FloatField(help_text='Hint', required=False)
shutter_speed_value = FloatField(help_text='Hint', required=False)
aperture_value = FloatField(help_text='Hint', required=False)
brightness_value = FloatField(help_text='Hint', required=False, initial=1.0)
exposure_bias_value = FloatField(help_text='Hint', required=False)
max_aperture_value = FloatField(help_text='Hint', required=False)
datetime = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
datetime_original = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
datetime_digitized = CharField(help_text='YYYY:MM:DD HH:MM:SS', required=False)
# image_width = IntegerField(help_text='ширина изображения', required=False)
# image_height = IntegerField(help_text='высота изображиния', required=False)
# bits_per_sample = CharField(help_text='Hint', required=False)
# compression = CharField(help_text='Hint', required=False)
# photometric_interpretation = CharField(help_text='Hint', required=False)
# orientation = CharField(help_text='Hint', required=False)
# samples_per_pixel = CharField(help_text='Hint', required=False)
# planar_configuration = CharField(help_text='Hint', required=False)
# subsampling_ratio_of_y_to_c = CharField(help_text='Hint', required=False)
# y_and_c_positioning = CharField(help_text='Hint', required=False)
# resolution_unit = CharField(help_text='Hint', required=False)
# strip_offsets = CharField(help_text='Hint', required=False)
# rows_per_strip = CharField(help_text='Hint', required=False)
# strip_byte_counts = CharField(help_text='Hint', required=False)
# jpeg_interchange_format = CharField(help_text='Hint', required=False)
# jpeg_interchange_format_length = CharField(help_text='Hint', required=False)
# transfer_function = CharField(help_text='Hint', required=False)
# white_point = CharField(help_text='Hint', required=False)
# primary_chromaticities = CharField(help_text='Hint', required=False)
# matrix_coefficients = CharField(help_text='Hint', required=False)
# reference_black_white = CharField(help_text='Hint', required=False)
# image_description = CharField(help_text='Hint', required=False)
# artist = CharField(help_text='Hint', required=False)
# copyright = CharField(help_text='Hint', required=False)
# exposure_time = CharField(help_text='Hint', required=False)
# f_number = CharField(help_text='Hint', required=False)
# exposure_program = CharField(help_text='Hint', required=False)
# spectral_sensitivity = CharField(help_text='Hint', required=False)
# photographic_sensitivity = CharField(help_text='Hint', required=False)
# oecf = CharField(help_text='Hint', required=False)
# sensitivity_type = CharField(help_text='Hint', required=False)
# standard_output_sensitivity = CharField(help_text='Hint', required=False)
# recommended_exposure_index = CharField(help_text='Hint', required=False)
# iso_speed = CharField(help_text='Hint', required=False)
# iso_speed_latitude_yyy = CharField(help_text='Hint', required=False)
# iso_speed_latitude_zzz = CharField(help_text='Hint', required=False)
# exif_version = CharField(help_text='Hint', required=False)
# offset_time = CharField(help_text='Hint', required=False)
# offset_time_original = CharField(help_text='Hint', required=False)
# offset_time_digitized = CharField(help_text='Hint', required=False)
# components_configuration = CharField(help_text='Hint', required=False)
# subject_distance = CharField(help_text='Hint', required=False)
# light_source = CharField(help_text='Hint', required=False)
# flash = CharField(help_text='Hint', required=False)
# subject_area = CharField(help_text='Hint', required=False)
# maker_note = CharField(help_text='Hint', required=False)
# user_comment = CharField(help_text='Hint', required=False)
# subsec_time = CharField(help_text='Hint', required=False)
# subsec_time_original = CharField(help_text='Hint', required=False)
# subsec_time_digitized = CharField(help_text='Hint', required=False)
# temperature = CharField(help_text='Hint', required=False)
# humidity = CharField(help_text='Hint', required=False)
# pressure = CharField(help_text='Hint', required=False)
# water_depth = CharField(help_text='Hint', required=False)
# acceleration = CharField(help_text='Hint', required=False)
# camera_elevation_angle = CharField(help_text='Hint', required=False)
# xp_title = CharField(help_text='Hint', required=False)
# xp_comment = CharField(help_text='Hint', required=False)
# xp_author = CharField(help_text='Hint', required=False)
# xp_keywords = CharField(help_text='Hint', required=False)
# xp_subject = CharField(help_text='Hint', required=False)
# flashpix_version = CharField(help_text='Hint', required=False)
# pixel_x_dimension = CharField(help_text='Hint', required=False)
# pixel_y_dimension = CharField(help_text='Hint', required=False)
# related_sound_file = CharField(help_text='Hint', required=False)
# flash_energy = CharField(help_text='Hint', required=False)
# spatial_frequency_response = CharField(help_text='Hint', required=False)
# focal_plane_x_resolution = CharField(help_text='Hint', required=False)
# focal_plane_y_resolution = CharField(help_text='Hint', required=False)
# focal_plane_resolution_unit = CharField(help_text='Hint', required=False)
# subject_location = CharField(help_text='Hint', required=False)
# exposure_index = CharField(help_text='Hint', required=False)
# file_source = CharField(help_text='Hint', required=False)
# scene_type = CharField(help_text='Hint', required=False)
# cfa_pattern = CharField(help_text='Hint', required=False)
# focal_length_in_35mm_film = CharField(help_text='Hint', required=False)
# gain_control = CharField(help_text='Hint', required=False)
# contrast = CharField(help_text='Hint', required=False)
# saturation = CharField(help_text='Hint', required=False)
# sharpness = CharField(help_text='Hint', required=False)
# device_setting_description = CharField(help_text='Hint', required=False)
# subject_distance_range = CharField(help_text='Hint', required=False)
# image_unique_id = CharField(help_text='Hint', required=False)
# camera_owner_name = CharField(help_text='Hint', required=False)
# body_serial_number = CharField(help_text='Hint', required=False)
# lens_specification = CharField(help_text='Hint', required=False)
# lens_make = CharField(help_text='Hint', required=False)
# lens_model = CharField(help_text='Hint', required=False)
# lens_serial_number = CharField(help_text='Hint', required=False)
# gamma = CharField(help_text='Hint', required=False)
# gps_version_id = CharField(help_text='Hint', required=False)
# gps_latitude_ref = CharField(help_text='Hint', required=False)
# gps_latitude = CharField(help_text='Hint', required=False)
# gps_longitude_ref = CharField(help_text='Hint', required=False)
# gps_longitude = CharField(help_text='Hint', required=False)
# gps_altitude_ref = CharField(help_text='Hint', required=False)
# gps_altitude = CharField(help_text='Hint', required=False)
# gps_timestamp = CharField(help_text='Hint', required=False)
# gps_satellites = CharField(help_text='Hint', required=False)
# gps_status = CharField(help_text='Hint', required=False)
# gps_measure_mode = CharField(help_text='Hint', required=False)
# gps_dop = CharField(help_text='Hint', required=False)
# gps_speed_ref = CharField(help_text='Hint', required=False)
# gps_speed = CharField(help_text='Hint', required=False)
# gps_track_ref = CharField(help_text='Hint', required=False)
# gps_track = CharField(help_text='Hint', required=False)
# gps_img_direction_ref = CharField(help_text='Hint', required=False)
# gps_img_direction = CharField(help_text='Hint', required=False)
# gps_map_datum = CharField(help_text='Hint', required=False)
# gps_dest_latitude_ref = CharField(help_text='Hint', required=False)
# gps_dest_latitude = CharField(help_text='Hint', required=False)
# gps_dest_longitude_ref = CharField(help_text='Hint', required=False)
# gps_dest_longitude = CharField(help_text='Hint', required=False)
# gps_dest_bearing_ref = CharField(help_text='Hint', required=False)
# gps_dest_bearing = CharField(help_text='Hint', required=False)
# gps_dest_distance_ref = CharField(help_text='Hint', required=False)
# gps_dest_distance = CharField(help_text='Hint', required=False)
# gps_processing_method = CharField(help_text='Hint', required=False)
# gps_area_information = CharField(help_text='Hint', required=False)
# gps_datestamp = CharField(help_text='Hint', required=False)
# gps_differential = CharField(help_text='Hint', required=False)
# gps_horizontal_positioning_error = CharField(help_text='Hint', required=False)
| 54.118721 | 85 | 0.724435 | 11,221 | 0.938054 | 0 | 0 | 0 | 0 | 0 | 0 | 8,256 | 0.690186 |
640b6609dd9c643f41eebce25853582c03e0723c | 3,045 | py | Python | setup.py | liu-bin-fluid/PySPOD | 55e0d6cde555c7d2c26c30ff8b4bb1aea76755ea | [
"MIT"
] | null | null | null | setup.py | liu-bin-fluid/PySPOD | 55e0d6cde555c7d2c26c30ff8b4bb1aea76755ea | [
"MIT"
] | null | null | null | setup.py | liu-bin-fluid/PySPOD | 55e0d6cde555c7d2c26c30ff8b4bb1aea76755ea | [
"MIT"
] | null | null | null | import os
import sys
import pyspod
import shutil
from setuptools import setup
from setuptools import Command
# GLOBAL VARIABLES
NAME = pyspod.__name__
URL = pyspod.__url__
AUTHOR = pyspod.__author__
EMAIL = pyspod.__email__
VERSION = pyspod.__version__
KEYWORDS='spectral-proper-orthogonal-decomposition spod'
REQUIRED = [
"numpy",
"scipy",
"matplotlib",
"xarray",
"netcdf4",
"h5py",
"psutil",
"tqdm",
"Sphinx",
"sphinx_rtd_theme",
"ecmwf_api_client",
"cdsapi",
"future",
"pytest",
]
EXTRAS = {
'docs': ['Sphinx==3.2.1', 'sphinx_rtd_theme'],
}
DESCR = (
"PySPOD is a Python package that implements the Spectral Proper Orthogonal"
" Decomposition (SPOD). SPOD is used to extract perfectly coherent spatio-temporal"
" patterns in complex datasets. Original work on this technique dates back"
" to (Lumley 1970), with recent development brought forward by (Towne et al. 2017),"
" (Schmidt et al. 2018), (Schmidt et al. 2019).\n"
"\n"
"PySPOD comes with a set of tutorials spanning weather and climate, seismic and "
" fluid mechanics applications, and it can be used for both canonical problems "
" as well as large datasets. \n"
)
CWD = os.path.abspath(os.path.dirname(__file__))
# COMMANDS
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
shutil.rmtree(os.path.join(CWD, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine...')
os.system('python3 twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(VERSION))
os.system('git push --tags')
sys.exit()
# SETUP
setup(
name=NAME,
version=VERSION,
description="Python Spectral Proper Orthogonal Decomposition",
long_description=DESCR,
author=AUTHOR,
author_email=EMAIL,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics'
],
keywords=KEYWORDS,
url=URL,
license='MIT',
packages=[NAME],
package_data={'': [
'plotting_support/coast.mat',
'plotting_support/coast_centred.mat'
]},
data_files=[
('pyspod',['pyspod/plotting_support/coast.mat']),
('pyspod',['pyspod/plotting_support/coast_centred.mat'])],
# package_dir={NAME: NAME},
# package_data={NAME: [
# 'pyspod/plotting_support/*.mat',
# ]},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
zip_safe=False,
cmdclass={
'upload': UploadCommand,
},)
| 24.756098 | 85 | 0.708374 | 797 | 0.261741 | 0 | 0 | 99 | 0.032512 | 0 | 0 | 1,750 | 0.574713 |
640e131501405e2d30ba4b73bddb7bcc5ccbd2f8 | 5,250 | py | Python | scripts/make_s2and_mini_dataset.py | atypon/S2AND | d1af114edf5941d0ec00f56b11508582e1091913 | [
"Apache-2.0"
] | 39 | 2021-03-09T23:07:43.000Z | 2022-03-24T17:32:54.000Z | scripts/make_s2and_mini_dataset.py | atypon/S2AND | d1af114edf5941d0ec00f56b11508582e1091913 | [
"Apache-2.0"
] | 10 | 2021-03-16T19:19:50.000Z | 2022-03-01T22:16:46.000Z | scripts/make_s2and_mini_dataset.py | atypon/S2AND | d1af114edf5941d0ec00f56b11508582e1091913 | [
"Apache-2.0"
] | 11 | 2021-04-02T11:38:43.000Z | 2022-03-28T02:58:18.000Z | import os
import json
import pickle
import collections
import numpy as np
from s2and.consts import CONFIG
DATA_DIR = CONFIG["main_data_dir"]
OUTPUT_DIR = os.path.join(DATA_DIR, "s2and_mini")
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# excluding MEDLINE because it has no clusters
DATASETS = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
BIG_BLOCK_CUTOFF = 500
TOP_BLOCKS_TO_KEEP = 1000
# load all of the artifacts of each dataset
clusters_all = []
signatures_all = []
X_all = []
keys_all = []
papers_all = []
for dataset in DATASETS:
print()
print(f"Loading data from {dataset}...")
for file_name in os.listdir(os.path.join(DATA_DIR, dataset)):
file_name = os.path.join(DATA_DIR, dataset, file_name)
if "specter" in file_name:
with open(file_name, "rb") as _pickle_file:
X, keys = pickle.load(_pickle_file)
X_all.append(X)
keys_all.append(keys)
elif "cluster" in file_name:
with open(file_name) as _json_file:
clusters = json.load(_json_file)
new_clusters = {}
for cluster_id, v in clusters.items():
new_cluster_id = f"{dataset}_{cluster_id}"
new_v = {
"cluster_id": new_cluster_id,
"signature_ids": [f"{dataset}_{i}" for i in v["signature_ids"]],
"model_version": v["model_version"],
}
new_clusters[new_cluster_id] = new_v
clusters_all.append(new_clusters)
elif "paper" in file_name:
with open(file_name) as _json_file:
papers = json.load(_json_file)
papers_all.append(papers)
elif "signature" in file_name:
with open(file_name) as _json_file:
signatures = json.load(_json_file)
new_signatures = {}
for signature_id, v in signatures.items():
new_signature_id = f"{dataset}_{signature_id}"
new_v = {
"author_id": v["author_id"], # maybe this needs to be prepended by dataset?
"paper_id": v["paper_id"],
"signature_id": new_signature_id,
"author_info": v["author_info"],
}
new_signatures[new_signature_id] = new_v
signatures_all.append(new_signatures)
else:
print(f"WARNING: Ignoring {file_name} in {dataset}")
print("Finished loading data. Filtering...")
# the goal is speed so we'll remove the largest blocks
# also only keep top 1000 blocks max
# aminer has 32k, inspire has 15k, and kisti has 7k blocks
for dataset, s, c, p, X, k in zip(DATASETS, signatures_all, clusters_all, papers_all, X_all, keys_all):
blocks = []
for v in s.values():
blocks.append(v["author_info"]["block"])
vc = collections.Counter(blocks)
blocks_to_keep = set([k for k, v in sorted(vc.items()) if v <= BIG_BLOCK_CUTOFF][:TOP_BLOCKS_TO_KEEP])
s_filtered = {k: v for k, v in s.items() if v["author_info"]["block"] in blocks_to_keep}
# filter the clusters too
c_filtered = {k: v for k, v in c.items() if np.all([i in s_filtered for i in v["signature_ids"]])}
# go back through the clusters and find the signatures we'll actually need
# need to do this because sometimes the block name is just... corrupted
# e.g. "g miller" for most signatures but "g mller" for one...
signature_keys_to_keep = set()
for v in c_filtered.values():
signature_keys_to_keep.update(v["signature_ids"])
s_filtered = {k: v for k, v in s.items() if k in signature_keys_to_keep}
# we don't need all the papers anymore. just the ones in signatures
# also the references of those
paper_ids = set([v["paper_id"] for v in s_filtered.values()])
ref_paper_ids = set()
for v in p.values():
if v["references"] is not None:
ref_paper_ids.update(v["references"])
p_filtered = {k: v for k, v in p.items() if int(k) in paper_ids or int(k) in ref_paper_ids}
# filter down the specters to those in papers only since we don't use specters for references
keys_filtered_flag = np.array([i in paper_ids for i in k.astype(int)])
k_filtered = k[keys_filtered_flag]
X_filtered = X[keys_filtered_flag, :]
# save all of the data
data_output_dir = os.path.join(DATA_DIR, "s2and_mini", dataset)
if not os.path.exists(data_output_dir):
os.mkdir(data_output_dir)
with open(os.path.join(data_output_dir, f"{dataset}_clusters.json"), "w") as _json_file:
json.dump(c_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_signatures.json"), "w") as _json_file:
json.dump(s_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_papers.json"), "w") as _json_file:
json.dump(p_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_specter.pickle"), "wb") as _pickle_file:
pickle.dump((X_filtered, k_filtered), _pickle_file)
| 38.321168 | 106 | 0.620762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,424 | 0.271238 |
640f471758ef4a58c7063f5ee390d52e3bdd5c27 | 1,101 | py | Python | adminmgr/media/code/python/red3/R1.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/python/red3/R1.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/python/red2/R1.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | #!/usr/bin/python3
"""reducer.py"""
from operator import itemgetter
import sys
answer_dict={}
current_batsman = None
current_count = 0
current_out=0
word = None
max_wicket=0
# input comes from STDIN
for line in sys.stdin:
line = line.strip()
batsman,wtype,count = line.split('\t')
try:
count = int(count)
except ValueError:
continue
if current_batsman == batsman:
current_count += count
if(wtype!='run out' and wtype!='retired hurt' and wtype!="\"\""):
current_out+=1
else:
if(current_batsman and current_count>5):
bat,bowler=current_batsman.split('*')
answer_dict[(bat,bowler)]=[current_out,current_count]
if(wtype!='runout' and wtype!='retired hurt' and wtype!=""):
current_out=0
current_count=count
current_batsman=batsman
# do not forget to output the last word if needed!
if(current_batsman and current_count>5):
bat,bowler=current_batsman.split('*')
answer_dict[(bat,bowler)]=[current_out,current_count]
sorted_answer=sorted(answer_dict.items(),key=lambda x:(-x[1][0],x[1][1],x[0][0]))
for i in sorted_answer:
print(i[0][0],i[0][1],i[1][0],i[1][1],sep=",")
| 28.230769 | 81 | 0.712988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.158038 |
640fe8452f7911154b7522bc1236edb9adb20fe6 | 1,973 | py | Python | 20160821_wavetable_chorus/code/runscripts/run_sawtooth.py | weegreenblobbie/sd_audio_hackers | 25a9cfa5049d5a1155c80fb1242c061c096a0d14 | [
"MIT"
] | 1 | 2016-07-10T22:37:22.000Z | 2016-07-10T22:37:22.000Z | 20160821_wavetable_chorus/code/runscripts/run_sawtooth.py | weegreenblobbie/sd_audio_hackers | 25a9cfa5049d5a1155c80fb1242c061c096a0d14 | [
"MIT"
] | null | null | null | 20160821_wavetable_chorus/code/runscripts/run_sawtooth.py | weegreenblobbie/sd_audio_hackers | 25a9cfa5049d5a1155c80fb1242c061c096a0d14 | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from sdaudio.callables import Circular
from sdaudio.callables import Constant
from sdaudio import draw
from sdaudio import wavio
from sdaudio.wt_oscillators import Choruses
def main():
#-------------------------------------------------------------------------
# sawtooth demo
print("Generating 60 Hz sawtooth, no chorus")
sr = 8000
dur = 7.0
freqs = draw.line(sr, dur, 60, 60)
x = draw.sawtooth(sr, dur, Circular(freqs), n = 5)
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, no chorus')
fout = 'saw-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# sawtooth oscillator with chorus
print("Generating 60 Hz sawtooth, with chorus")
table = draw.sawtooth(sr, 1.0, Constant(1.0))
chorus = [0.99, 1.0, 1.01]
chorus = [0.97, 1.0, 1.03]
chorus = [0.991234134, 1.012983475290375]
gen = Choruses(sr, table, chorus)
x = gen.generate(dur, Circular(freqs))
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, with chorus')
fout = 'saw-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# freq ramp
print("Generating sawtooth ramp, no chorus")
freqs = draw.line(sr, dur, 40, 200)
x = draw.sawtooth(sr, dur, Circular(freqs))
fout = 'saw-ramp-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
print("Generating sawtooth ramp, with chorus")
x = gen.generate(dur, Circular(freqs))
fout = 'saw-ramp-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
plt.show()
if __name__ == "__main__":
main() | 20.132653 | 78 | 0.540294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.326913 |
641072198fc4166738877f258f40200bcaf10935 | 539 | py | Python | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | # Generated by Django 3.1.13 on 2021-09-15 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('outdoor', '0025_merge_ratings_min_max'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='ratings_max',
),
migrations.AddField(
model_name='site',
name='ratings',
field=models.ManyToManyField(blank=True, related_name='sites', to='outdoor.Rating'),
),
]
| 23.434783 | 96 | 0.588126 | 445 | 0.825603 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.263451 |
6410afce20729bdd144bd450343a4fe9f0265fcf | 17,977 | py | Python | run.py | nicholasfalconi/Restaurant-modeling-project | 1a54a0aa79d8e783b369960b5f8bb12aa1f07528 | [
"MIT"
] | null | null | null | run.py | nicholasfalconi/Restaurant-modeling-project | 1a54a0aa79d8e783b369960b5f8bb12aa1f07528 | [
"MIT"
] | null | null | null | run.py | nicholasfalconi/Restaurant-modeling-project | 1a54a0aa79d8e783b369960b5f8bb12aa1f07528 | [
"MIT"
] | 1 | 2022-01-27T00:03:27.000Z | 2022-01-27T00:03:27.000Z | """
Nicholas Falconi
Lizzy Klosa
Kaitlyn hung
Alex baldassare
CISC 204
Modelling project
Wed december 9th 2020
Professor Muise
"""
#Import
from nnf import Var
from nnf import Or
import nnf
from lib204 import Encoding
from csvReader import readCSV
'''
Customer class
Used to create a class containing the various restrictions a
person might have with a restaurant
Paramaters:
price: Price range being searched for
diet: any diet restrictions
dine_opt: preferred dining options
'''
class customer:
def __init__(self, price_opt, diet_opt, dine_opt,distance):
self.userprice = price_opt
self.userdiet = diet_opt
self.userdine_opt = dine_opt
self.distance = distance
#Defining variables for encoding
#Price point variables
low = Var('low')
med = Var('med')
high = Var('high')
#Dietary restriction food options variables
vegetarian = Var('vegetarian')
vegan = Var('vegan')
gluten = Var('gluten')
lactose = Var('lactose')
#Dining variables
dine_in = Var('dine-in')
take_out = Var('take-out')
delivery = Var('delivery')
#Distance variables
time_under_10 = Var('under 10')
time_10_to_20 = Var('10 to 20')
time_over_20 = Var('over 20')
#Constraints
"""
If the user selected a price constraint and it matches
$,$$,$$$. If the restaurant matches the price point then
the constraint will get returned so that its only holds true
for that instance.
Parameters: Restaurant object, Customer object
Returns: A price constraint
"""
def price_constraint(restaurant,customer):
#For low price point
if "low" in customer.userprice:
if restaurant.price == "$":
return low & ~med & ~high
else:
return low & ~low
#For the med price point
if "med" in customer.userprice:
if restaurant.price == "$$":
return med & ~high & ~low
else:
return med & ~med
#For the high price point
if "high" in customer.userprice:
if restaurant.price == "$$$":
return high & ~low & ~med
else:
return high & ~high
"""
If the user selected a single dietary restriction the
appropriate constraint will get returned so it only
holds true for that instance.
Parameters: Restaurant object, Customer object
Returns: A single dietary restriction constraint
"""
def single_diet_constraint(restaurant, customer):
#For gluten free
if 'gluten' in customer.userdiet:
if 'TRUE' in restaurant.diet[2]:
return gluten & ~vegan & ~vegetarian & ~lactose
else:
return ~gluten & gluten
#For lactose
elif 'lactose' in customer.userdiet:
if 'TRUE' in restaurant.diet[3]:
return ~gluten & ~vegan & ~vegetarian & lactose
else:
return ~lactose & lactose
#For vegetarian
elif 'vegetarian' in customer.userdiet:
if 'TRUE' in restaurant.diet[1]:
return ~gluten & ~vegan & vegetarian & ~lactose
else:
return ~vegetarian & vegetarian
#For vegan
elif 'vegan' in customer.userdiet:
if 'TRUE' in restaurant.diet[0]:
return ~gluten & vegan & ~vegetarian & ~lactose
else:
return ~vegan & vegan
"""If the user selected two dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: A single two dietary restriction constraint
"""
def two_diet_constraint(restaurant, customer):
#For vegetarian and vegan customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & vegan & ~lactose & ~gluten
else:
return vegetarian & ~vegetarian
#For vegan and lactose free customers
elif ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & ~gluten
else:
return vegan & ~vegan
#For vegetarian and gluten free customers
elif ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[2]):
return ~vegetarian & vegan & ~lactose & gluten
else:
return vegan & ~vegan
#For gluten free and lactose free customers
elif ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & ~vegan & lactose & gluten
else:
return gluten & ~gluten
#For gluten free and vegitarian customers
elif ('gluten' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & ~lactose & gluten
else:
return gluten & ~gluten
#For lactose free and vegetarian customers
elif ('lactose' in customer.userdiet) and ('vegitarian' in customer.userdiet):
if ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[1]):
return vegetarian & ~vegan & lactose & ~gluten
else:
return lactose & ~lactose
"""If the user selected three dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single three dietary constraint
"""
def three_diet_constraint(restaurant,customer):
# For vegetarian and vegan and gluten free customers
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]):
return vegetarian & vegan & ~lactose & gluten
else:
return vegetarian & ~vegetarian
# For vegetarian and vegan and lactose free customers
elif ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & ~gluten
else:
return vegetarian & ~vegetarian
# For gluten free and vegan and lactose free customers
elif ('gluten' in customer.userdiet) and ('vegan' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[3]):
return ~vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected all dietary restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single all dietary constraint
"""
def all_diet_constraint(restaurant,customer):
# For users that have all the dietary restrictions
if ('vegetarian' in customer.userdiet) and ('vegan' in customer.userdiet) and ('gluten' in customer.userdiet) and ('lactose' in customer.userdiet):
if ('TRUE' in restaurant.diet[0]) and ('TRUE' in restaurant.diet[1]) and ('TRUE' in restaurant.diet[2]) and ('TRUE' in restaurant.diet[3]):
return vegetarian & vegan & lactose & gluten
else:
return vegetarian & ~vegetarian
"""If the user selected one dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: a single dining constraint
"""
def one_dining_constraints(restaurant, customer):
# For dine in customers
if 'dine-in' in customer.userdine_opt:
if restaurant.delivery[0] == 'TRUE':
return dine_in
else:
return ~dine_in & dine_in
# For take out customers
elif 'take-out' in customer.userdine_opt:
if restaurant.delivery[1] == 'TRUE':
return take_out
else:
return ~take_out & take_out
# For delivery customers
elif 'delivery' in customer.userdine_opt:
if restaurant.delivery[2] == 'TRUE':
return delivery
else:
return ~delivery & delivery
"""If the user selected two dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: two dining constraint
"""
def two_dining_constraints(restaurant, customer):
#For users that want dine in and take out
if ('dine-in' in customer.userdine_opt) and ('take-out' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE':
return dine_in & take_out & ~delivery
else:
return ~dine_in & dine_in
#For users that want Dine in and Delivery
elif ('dine-in' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & ~take_out & delivery
else:
return ~dine_in & dine_in
#For users that want Take out and Delivery
elif ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt):
if restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return ~dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected all dining restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: all dining constraint
"""
def all_dining_constraints(restaurant, customer):
# For users that want dine in, Take out and delivery
if ('take-out' in customer.userdine_opt) and ('delivery' in customer.userdine_opt) and ('dine-in' in customer.userdine_opt):
if restaurant.delivery[0] == 'TRUE' and restaurant.delivery[1] == 'TRUE' and restaurant.delivery[2] == 'TRUE':
return dine_in & take_out & delivery
else:
return ~dine_in & dine_in
"""If the user selected distance restrictions the
appropriate constrain will get returned so it only
holds true for that instance
Parameters: Restaurant object, Customer object
Returns: distance constraint
"""
def distanceConstraint(restaurant,customer):
#For customers that want under 10 to campus
if customer.distance == 'under 10':
if restaurant.distance[0] == 'TRUE':
return time_under_10 & ~time_10_to_20 & ~time_over_20
else:
return time_under_10 & ~time_under_10
# For customers that want 10-20 min to campus
if customer.distance == '10 to 20':
if restaurant.distance[1] == 'TRUE':
return time_10_to_20 & ~time_under_10 & ~time_over_20
else:
return time_10_to_20 & ~time_10_to_20
# For customers that dont mind over the distance being over 20 minutes to campus
if customer.distance == 'over 20':
if restaurant.distance[2] == 'TRUE':
return time_over_20 & ~time_10_to_20 & ~time_under_10
else:
return time_over_20 & ~time_over_20
"""
This function is where the constraints get added to our
theory.
Parameters: Restaurant object and Customer object
"""
def example_theory(restaurant,customer):
# Shorter variables for the objects
r = restaurant
c = customer
# Defining encoding variable
E = Encoding()
# Add distance constraint
E.add_constraint(distanceConstraint(r,c))
E.add_constraint(price_constraint(r,c))
# Add dining constraints
if len(user.userdine_opt) == 1:
E.add_constraint(one_dining_constraints(r,c))
elif len(user.userdine_opt) == 2:
E.add_constraint(two_dining_constraints(r,c))
elif len(user.userdine_opt) == 3:
E.add_constraint(all_dining_constraints(r,c))
# Add Diet constraints
if len(user.userdiet) == 1:
if 5 in user.userdiet:
pass
else:
E.add_constraint(single_diet_constraint(r,c))
elif len(user.userdiet) == 2:
E.add_constraint(two_diet_constraint(r,c))
elif len(user.userdiet) == 3:
E.add_constraint(three_diet_constraint(r,c))
elif len(user.userdiet) == 4:
E.add_constraint(all_diet_constraint(r,c))
# return the Encoding variable
return E
"""
Main method: Where the implementation happens. The theory gets solved
where a sorted list from best result to worst result is displayed
to the screen.
The user also inputs their prefrences
"""
if __name__ == "__main__":
# This is where we will get user input information
flag = True
restaurant_list = readCSV()
# While loop to start
while flag:
# creating example theory
# T = example_theory()
# Asking if user wants to continue or exit
prog_exit = input('Welcome to the Queens restuarant finder! Press Q to quit or enter to continue.\n')
# if statement to exit
if prog_exit.lower() == 'q':
break
# Getting users price range information
user_price = int(input('Please select a price range: \n 1. $ - most affordable'\
'\n 2. $$ - intermediate \n 3. $$$ - most expensive\n'))
# Telling user which price was selected as well as some exception handling
if user_price in [1,2,3]:
if user_price == 1:
price = 'low'
print('You selected $.')
elif user_price == 2:
price = 'med'
print('You selected $$.')
else:
price = 'high'
print('You selected $$$')
else:
print('Invalid input: Must be either option 1, 2 or 3')
# Getting diet restrictions of the user
user_restrictions_in = input('Please select the following diet restrictions '
'(please separate by a comma if selecting multiple):'
' \n 1. Vegan \n 2. Vegetarian \n 3. Gluten-free \n'
' 4. lactose intolerant \n 5. No restrictions\n')
# Since there is a possibility of having multiple restrictions, split into list
user_selected_restrictions = user_restrictions_in.split(',')
# Turning list of strings into list of integers
for entry in range(len(user_selected_restrictions)):
user_selected_restrictions[entry] = int(user_selected_restrictions[entry])
# Getting user input for dietary restrictions
diet = []
if 1 in user_selected_restrictions:
diet.append('vegan')
if 2 in user_selected_restrictions:
diet.append('vegetarian')
if 3 in user_selected_restrictions:
diet.append('gluten')
if 4 in user_selected_restrictions:
diet.append('lactose')
# Getting user preference for dining options
user_dine_option = input('Please select a dining option. If multiple separate by a comma: \n 1. Dine-in \n 2. Take-out\n 3. Delivery\n')
dine_in_list = user_dine_option.split(',')
final_list = []
if '1' in dine_in_list:
final_list.append('dine-in')
if '2' in dine_in_list:
final_list.append('take-out')
if '3' in dine_in_list:
final_list.append('delivery')
# Getting user preference for distance
user_distance_option = int(input('Please select a distance from Queens campus:'
' \n 1. Under 10 minutes \n 2. Between 10 and 20 minutes \n 3. Over 20 minutes\n'))
if user_distance_option == 1:
distance = 'under 10'
elif user_distance_option == 2:
distance = '10 to 20'
else:
distance = 'over 20'
# Creating customer class to store information in an object for easier access
user = customer(price, diet, final_list, distance)
# Need to iterate through the list and find which restaurants match with the users preference
# using the example theory function. Use T.solve to find the solution to the users preferences and then match with
# restaurants that match up
# T = example_theory(user)
# List to display results
finalListR = []
# Loops through each restaurant in the csv file
for entry in restaurant_list:
# Variable for example theory method
T = example_theory(entry, user)
""" Checks if the theory is satisfiable for each restaurant.
this is where we determine if the restaurant is a good fit
or not"""
y = example_theory(entry,user).is_satisfiable()
# if the theory is satified
if y == True:
finalListR.insert(0, entry.name)
else:
finalListR.insert(len(finalListR), entry.name)
# to display all the results of restaurants best fit to worst fit
for i in range(len(finalListR)):
if i < 4:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★ ★')
elif i >= 4 and i < 7:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★ ★')
elif i <= 7 and i < 11:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★ ★')
elif i <= 11 and i < 15:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★ ★')
else:
print(f"{i + 1}. %s" % finalListR[i] + ' ' + '★') | 35.180039 | 151 | 0.637036 | 217 | 0.012051 | 0 | 0 | 0 | 0 | 0 | 0 | 7,206 | 0.400178 |
6413123da59d354e8ffb8662aa1808d1cf028e0c | 1,521 | py | Python | tdd/app/t_stock_backtest.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | tdd/app/t_stock_backtest.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | tdd/app/t_stock_backtest.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | import unittest
from app.stock_backtest import StockBacktest
class TStockBacktest(unittest.TestCase):
def test_buy_stock(self):
user_id = 1
account_id = 1
ts_code = '603912.SH'
curr_date = '20190102'
buy_vol = 888
sbt = StockBacktest()
sbt.buy_stock(user_id, account_id, ts_code, curr_date, buy_vol)
self.assertTrue(True)
def test_sell_stock(self):
user_id = 1
account_id = 1
ts_code = '603912.SH'
trade_date = '20190103'
sell_vol = 800
sbt = StockBacktest()
sbt.sell_stock(user_id, account_id, ts_code, trade_date, sell_vol)
self.assertTrue(True)
def test_get_stock_vo(self):
stock_id = 69
ts_code = '603912.SH'
start_dt = '20180101'
end_dt = '20181231'
sbt = StockBacktest()
stock_vo = sbt.get_stock_vo(stock_id, ts_code, start_dt, end_dt)
print('{0} {1} {2} {3} tran_len:{4} test_len:{5}'.format(
stock_vo['stock_id'], stock_vo['ts_code'],
stock_vo['mus'], stock_vo['stds'],
len(stock_vo['train_x']), len(stock_vo['test_x']))
)
self.assertTrue(True)
def test_get_stocks(self):
sbt = StockBacktest()
start_dt, end_dt = '20180101', '20181231'
stocks = sbt.get_stocks(start_dt, end_dt)
for stock in stocks:
print('{0} {1}:{2}'.format(stock['stock_id'], stock['ts_code'], len(stock['train_x'])))
| 33.8 | 99 | 0.584484 | 1,458 | 0.95858 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.147272 |
64134484d7315ddaea25899ef8706ea900d1a58c | 3,080 | py | Python | lib/sii_connector_auth.py | SunPaz/sii-dte-py | 9b9cdc55edc20d50eaef777560cde763bcf979f3 | [
"MIT"
] | null | null | null | lib/sii_connector_auth.py | SunPaz/sii-dte-py | 9b9cdc55edc20d50eaef777560cde763bcf979f3 | [
"MIT"
] | null | null | null | lib/sii_connector_auth.py | SunPaz/sii-dte-py | 9b9cdc55edc20d50eaef777560cde763bcf979f3 | [
"MIT"
] | 1 | 2020-10-04T00:37:48.000Z | 2020-10-04T00:37:48.000Z | from lib.sii_connector_base import SiiConnectorBase
from lxml.etree import tostring
import logging
from requests import Session
from zeep import Client,Transport
from lib.zeep.sii_plugin import SiiPlugin
from zeep.exceptions import SignatureVerificationFailed
import re
class SiiConnectorAuth(SiiConnectorBase):
""" As discribed in documentation, seed is at least 12 digits """
REGEX_MATCH_SEED = r"<SEMILLA>(\d{12,})</SEMILLA>"
REGEX_MATCH_TOKEN = r"<TOKEN>(\w{8,})</TOKEN>"
""" State seems to be at least 3 digits """
REGEX_MATCH_STATE = r"<ESTADO>(\d{2,})</ESTADO>"
""" Default parameters : test server, CrSeed, test, SSL """
def __init__(self, server='maullin', module=0, mode=1, ssl=1):
SiiConnectorBase.__init__(self, server, module, mode, ssl)
"""
Retrieve "SEMILLA" (seed) used for authentication
"""
def get_seed(self):
""" Get logger """
logger = logging.getLogger()
""" Calling getSeed SOAP method """
response = self.soap_client.service.getSeed()
""" Parsing response using RegEX """
match = re.search(self.REGEX_MATCH_SEED, response, re.MULTILINE)
if match:
seed = match.group(1)
""" Parsing state using RegEX """
match = re.search(self.REGEX_MATCH_STATE, response, re.MULTILINE)
if match:
state = match.group(1)
""" State 00 indicate success """
if state != "00":
logger.error("get_seed:: Server respond with invalid state code : " + str(state))
logger.info("Seed " + str(seed))
return seed
def read_file(self, f_name):
with open(f_name, "rb") as f:
return f.read()
def get_token(self, seed):
assert len(seed) >= 12
""" Get logger """
logger = logging.getLogger()
logger.debug("get_token:: Getting token")
token = ''
token_message = self.build_token_message(seed)
response = self.soap_client.service.getToken(token_message)
""" Parsing response using RegEX """
match = re.search(self.REGEX_MATCH_TOKEN, response, re.MULTILINE)
if match:
token = match.group(1)
""" Parsing state using RegEX """
match = re.search(self.REGEX_MATCH_STATE, response, re.MULTILINE)
if match:
state = match.group(1)
""" State 00 indicate success """
if state == "10" or state == "11":
logger.error("get_token:: Server respond with invalid state code : " + str(state) + " certificate might not be registered in SII.")
if state != "00":
logger.error("get_token:: Server respond with invalid state code : " + str(state))
""" Unload certificate """
self.unreference_certificate_service()
return token
def build_token_message(self, seed):
""" Get logger """
logger = logging.getLogger()
""" Build token template (Message + Signature) """
token_template = u'<getToken><item><Semilla>' + seed + '</Semilla></item>' + self.read_file('cert/sign_sii_fun.tmpl').decode('utf-8') + '</getToken>'
token_message = self.sii_plugin.sign(token_template)
""" Add XML standard header """
token_message = '<?xml version="1.0" encoding="UTF-8"?> ' + token_message
logger.debug("build_token_message:: Message :")
logger.debug(str(token_message))
return token_message
| 33.11828 | 151 | 0.697727 | 2,808 | 0.911688 | 0 | 0 | 0 | 0 | 0 | 0 | 1,152 | 0.374026 |
641430445926479d1d6d4a9324027293c6645f00 | 15,363 | py | Python | src/psd_tools/decoder/image_resources.py | jacenfox/psd-tools2 | 4dff53316a18a96b331832c5fcdb48a15ab26b02 | [
"MIT"
] | 2 | 2019-05-15T12:43:23.000Z | 2020-06-09T15:12:52.000Z | src/psd_tools/decoder/image_resources.py | jacenfox/psd-tools2 | 4dff53316a18a96b331832c5fcdb48a15ab26b02 | [
"MIT"
] | null | null | null | src/psd_tools/decoder/image_resources.py | jacenfox/psd-tools2 | 4dff53316a18a96b331832c5fcdb48a15ab26b02 | [
"MIT"
] | 1 | 2019-03-14T02:24:50.000Z | 2019-03-14T02:24:50.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import io
import warnings
from collections import namedtuple
from psd_tools.utils import (read_pascal_string, unpack, read_fmt,
read_unicode_string, be_array_from_bytes,
decode_fixed_point_32bit)
from psd_tools.constants import (ImageResourceID, PrintScaleStyle,
DisplayResolutionUnit, DimensionUnit)
from psd_tools.decoder import decoders
from psd_tools.decoder.actions import decode_descriptor, UnknownOSType, RawData
from psd_tools.decoder.color import decode_color
from psd_tools.decoder.path import decode_path_resource
_image_resource_decoders, register = decoders.new_registry()
_image_resource_decoders.update({
ImageResourceID.LAYER_STATE_INFO: decoders.single_value("H"),
ImageResourceID.WATERMARK: decoders.single_value("B"),
ImageResourceID.ICC_UNTAGGED_PROFILE: decoders.boolean(),
ImageResourceID.EFFECTS_VISIBLE: decoders.boolean(),
ImageResourceID.IDS_SEED_NUMBER: decoders.single_value("I"),
ImageResourceID.INDEXED_COLOR_TABLE_COUNT: decoders.single_value("H"),
ImageResourceID.TRANSPARENCY_INDEX: decoders.single_value("H"),
ImageResourceID.GLOBAL_ALTITUDE: decoders.single_value("i"),
ImageResourceID.GLOBAL_ANGLE: decoders.single_value("i"),
ImageResourceID.COPYRIGHT_FLAG: decoders.boolean("H"),
ImageResourceID.ALPHA_NAMES_UNICODE: decoders.unicode_string,
ImageResourceID.WORKFLOW_URL: decoders.unicode_string,
ImageResourceID.AUTO_SAVE_FILE_PATH: decoders.unicode_string,
ImageResourceID.AUTO_SAVE_FORMAT: decoders.unicode_string,
})
HalftoneScreen = namedtuple(
'HalftoneScreen', 'ink_frequency units angle shape accurate_screen '
'printer_default')
TransferFunction = namedtuple('TransferFunction', 'curve override')
PrintScale = namedtuple('PrintScale', 'style, x, y, scale')
PrintFlags = namedtuple(
'PrintFlags', 'labels, crop_marks, color_bars, registration_marks, '
'negative, flip, interpolate, caption, print_flags')
PrintFlagsInfo = namedtuple(
'PrintFlagsInfo',
'version, center_crop_marks, bleed_width_value, bleed_width_scale')
ThumbnailResource = namedtuple(
'ThumbnailResource',
'format, width, height, widthbytes, total_size, size, bits, planes, '
'data')
SlicesHeader = namedtuple('SlicesHeader', 'descriptor_version, descriptor')
SlicesHeaderV6 = namedtuple(
'SlicesHeaderV6', 'top left bottom right name count items')
SlicesResourceBlock = namedtuple(
'SlicesResourceBlock', 'id group_id origin associated_id name type '
'left top right bottom url target message alt_tag cell_is_html cell_text '
'horizontal_alignment vertical_alignment alpha red green blue descriptor')
VersionInfo = namedtuple(
'VersionInfo', 'version, has_real_merged_data, writer_name, '
'reader_name, file_version')
UrlListItem = namedtuple('UrlListItem', 'number, id, url')
PixelAspectRatio = namedtuple(
'PixelAspectRatio', 'version aspect')
_ResolutionInfo = namedtuple(
'ResolutionInfo', 'h_res, h_res_unit, width_unit, v_res, v_res_unit, '
'height_unit')
LayerComps = namedtuple(
'LayerComps', 'descriptor_version descriptor')
MeasurementScale = namedtuple(
'MeasurementScale', 'descriptor_version descriptor')
TimelineInformation = namedtuple(
'TimelineInformation', 'descriptor_version descriptor')
SheetDisclosure = namedtuple(
'SheetDisclosure', 'descriptor_version descriptor')
OnionSkins = namedtuple('OnionSkins', 'descriptor_version descriptor')
CountInformation = namedtuple(
'CountInformation', 'descriptor_version descriptor')
PrintInformation = namedtuple(
'PrintInformation', 'descriptor_version descriptor')
PrintStyle = namedtuple(
'PrintStyle', 'descriptor_version descriptor')
PathSelectionState = namedtuple(
'PathSelectionState', 'descriptor_version descriptor')
GridGuideResource = namedtuple(
'GridGuideResource', 'version grid_horizontal grid_vertical guides')
GuideResourceBlock = namedtuple(
'GuideResourceBlock', 'location direction')
OriginPathInfo = namedtuple(
'OriginPathInfo', 'descriptor_version descriptor')
class ResolutionInfo(_ResolutionInfo):
def __repr__(self):
return ("ResolutionInfo(h_res=%s, h_res_unit=%s, v_res=%s, "
"v_res_unit=%s, width_unit=%s, height_unit=%s)") % (
self.h_res,
DisplayResolutionUnit.name_of(self.h_res_unit),
self.v_res,
DisplayResolutionUnit.name_of(self.v_res_unit),
DimensionUnit.name_of(self.width_unit),
DimensionUnit.name_of(self.height_unit),
)
def decode(image_resource_blocks):
"""
Replaces ``data`` of image resource blocks with parsed data structures.
"""
return [parse_image_resource(res) for res in image_resource_blocks]
def parse_image_resource(resource):
"""
Replaces ``data`` of image resource block with a parsed data structure.
"""
if not ImageResourceID.is_known(resource.resource_id):
warnings.warn("Unknown resource_id (%s)" % resource.resource_id)
if (ImageResourceID.PATH_INFO_0 <= resource.resource_id and
ImageResourceID.PATH_INFO_LAST >= resource.resource_id):
decoder = decode_path_resource
else:
decoder = _image_resource_decoders.get(resource.resource_id,
lambda data: data)
return resource._replace(data=decoder(resource.data))
def _decode_descriptor_resource(data, kls):
if isinstance(data, bytes):
fp = io.BytesIO(data)
version = read_fmt("I", fp)[0]
try:
return kls(version, decode_descriptor(None, fp))
except UnknownOSType as e:
warnings.warn("Ignoring image resource %s" % e)
return data
@register(ImageResourceID.GRAYSCALE_HALFTONING_INFO)
@register(ImageResourceID.COLOR_HALFTONING_INFO)
@register(ImageResourceID.DUOTONE_HALFTONING_INFO)
def _decode_halftone_screens(data):
if not len(data) == 72:
return data
fp = io.BytesIO(data)
descriptions = []
for i in range(4):
# 16 bits + 16 bits fixed points.
ink_frequency = float(read_fmt("I", fp)[0]) / 0x10000
units = read_fmt("h", fp)[0]
angle = read_fmt("I", fp)[0] / 0x10000
shape = read_fmt("H", fp)[0]
padding = read_fmt("I", fp)[0]
if padding:
warnings.warn("Invalid halftone screens")
return data
accurate_screen, printer_default = read_fmt("2?", fp)
descriptions.append(HalftoneScreen(
ink_frequency, units, angle, shape, accurate_screen,
printer_default))
return descriptions
@register(ImageResourceID.GRAYSCALE_TRANSFER_FUNCTION)
@register(ImageResourceID.COLOR_TRANSFER_FUNCTION)
@register(ImageResourceID.DUOTONE_TRANSFER_FUNCTION)
def _decode_transfer_function(data):
if not len(data) == 112:
return data
fp = io.BytesIO(data)
functions = []
for i in range(4):
curve = read_fmt("13h", fp)
override = read_fmt("H", fp)[0]
functions.append(TransferFunction(curve, override))
return functions
@register(ImageResourceID.LAYER_GROUP_INFO)
def _decode_layer_group_info(data):
return be_array_from_bytes("H", data)
@register(ImageResourceID.LAYER_SELECTION_IDS)
def _decode_layer_selection(data):
return be_array_from_bytes("I", data[2:])
@register(ImageResourceID.LAYER_GROUPS_ENABLED_ID)
def _decode_layer_groups_enabled_id(data):
return be_array_from_bytes("B", data)
@register(ImageResourceID.THUMBNAIL_RESOURCE_PS4)
@register(ImageResourceID.THUMBNAIL_RESOURCE)
def _decode_thumbnail_resource(data):
fp = io.BytesIO(data)
fmt, width, height, widthbytes, total_size, size, bits, planes = read_fmt(
"6I2H", fp)
jfif = RawData(fp.read(size))
return ThumbnailResource(fmt, width, height, widthbytes, total_size, size,
bits, planes, jfif)
@register(ImageResourceID.SLICES)
def _decode_slices(data):
fp = io.BytesIO(data)
version = read_fmt('I', fp)[0]
if version == 6:
return _decode_slices_v6(fp)
elif version in (7, 8):
return _decode_descriptor_resource(fp.read(-1), SlicesHeader)
else:
warnings.warn("Unsupported slices version %s. "
"Only version 7 or 8 slices supported." % version)
return data
def _decode_slices_v6(fp):
bbox = read_fmt('4I', fp)
name = read_unicode_string(fp)
count = read_fmt('I', fp)[0]
items = []
for index in range(count):
items.append(_decode_slices_v6_block(fp))
return SlicesHeaderV6(bbox[0], bbox[1], bbox[2], bbox[3], name, count,
items)
def _decode_slices_v6_block(fp):
slice_id, group_id, origin = read_fmt('3I', fp)
associated_id = read_fmt('I', fp)[0] if origin == 1 else None
name = read_unicode_string(fp)
slice_type, left, top, right, bottom = read_fmt('5I', fp)
url = read_unicode_string(fp)
target = read_unicode_string(fp)
message = read_unicode_string(fp)
alt_tag = read_unicode_string(fp)
cell_is_html = read_fmt('?', fp)[0]
cell_text = read_unicode_string(fp)
horizontal_alignment, vertical_alignment = read_fmt('2I', fp)
alpha, red, green, blue = read_fmt('4B', fp)
# Some version stores descriptor here, but the documentation unclear...
descriptor = None
return SlicesResourceBlock(
slice_id, group_id, origin, associated_id, name, slice_type, left,
top, right, bottom, url, target, message, alt_tag, cell_is_html,
cell_text, horizontal_alignment, vertical_alignment, alpha, red,
green, blue, descriptor)
@register(ImageResourceID.URL_LIST)
def _decode_url_list(data):
urls = []
fp = io.BytesIO(data)
count = read_fmt("I", fp)[0]
try:
for i in range(count):
number, id = read_fmt("2I", fp)
url = read_unicode_string(fp)
urls.append(UrlListItem(number, id, url))
return urls
except UnknownOSType as e:
warnings.warn("Ignoring image resource %s" % e)
return data
@register(ImageResourceID.VERSION_INFO)
def _decode_version_info(data):
fp = io.BytesIO(data)
return VersionInfo(
read_fmt("I", fp)[0],
read_fmt("?", fp)[0],
read_unicode_string(fp),
read_unicode_string(fp),
read_fmt("I", fp)[0],
)
@register(ImageResourceID.EXIF_DATA_1)
@register(ImageResourceID.EXIF_DATA_3)
def _decode_exif_data(data):
try:
import exifread
except:
warnings.warn("EXIF data is ignored. Install exifread to decode.")
return data
fp = io.BytesIO(data)
tags = exifread.process_file(fp)
exif = {}
for key in tags.keys():
ifd = tags[key]
if isinstance(ifd, exifread.classes.IfdTag):
field_type = exifread.tags.FIELD_TYPES[ifd.field_type - 1]
if field_type[1] in ('A', 'B'):
exif[key] = str(ifd)
else:
try:
exif[key] = int(str(ifd))
except ValueError:
exif[key] = str(ifd)
else:
# Seems sometimes EXIF data is corrupt.
pass
return exif
@register(ImageResourceID.PIXEL_ASPECT_RATIO)
def _decode_pixel_aspect_ration(data):
version = unpack("I", data[:4])[0]
aspect = unpack("d", data[4:])[0]
return PixelAspectRatio(version, aspect)
@register(ImageResourceID.PRINT_FLAGS)
def _decode_print_flags(data):
return PrintFlags(*(unpack("9?x", data)))
@register(ImageResourceID.PRINT_FLAGS_INFO)
def _decode_print_flags_info(data):
return PrintFlagsInfo(*(unpack("HBxIh", data)))
@register(ImageResourceID.PRINT_SCALE)
def _decode_print_scale(data):
style, x, y, scale = unpack("H3f", data)
if not PrintScaleStyle.is_known(style):
warnings.warn("Unknown print scale style (%s)" % style)
return PrintScale(style, x, y, scale)
@register(ImageResourceID.CAPTION_PASCAL)
def _decode_caption_pascal(data):
fp = io.BytesIO(data)
return read_pascal_string(fp, 'ascii')
@register(ImageResourceID.RESOLUTION_INFO)
def _decode_resolution(data):
h_res, h_res_unit, width_unit, v_res, v_res_unit, height_unit = unpack(
"4s HH 4s HH", data)
h_res = decode_fixed_point_32bit(h_res)
v_res = decode_fixed_point_32bit(v_res)
return ResolutionInfo(
h_res, h_res_unit, width_unit, v_res, v_res_unit, height_unit)
@register(ImageResourceID.GRID_AND_GUIDES_INFO)
def _decode_grid_and_guides_info(data):
fp = io.BytesIO(data)
version, grid_h, grid_v, guide_count = read_fmt("4I", fp)
try:
guides = []
for i in range(guide_count):
guides.append(GuideResourceBlock(*read_fmt("IB", fp)))
return GridGuideResource(version, grid_h, grid_v, guides)
except UnknownOSType as e:
warnings.warn("Ignoring image resource %s" % e)
return data
@register(ImageResourceID.ICC_PROFILE)
def _decode_icc(data):
try:
from PIL import ImageCms
return ImageCms.ImageCmsProfile(io.BytesIO(data))
except ImportError:
warnings.warn(
"ICC profile is not handled; colors could be incorrect. "
"Please build PIL or Pillow with littlecms/littlecms2 support.")
return data
@register(ImageResourceID.BACKGROUND_COLOR)
def _decode_background_color(data):
fp = io.BytesIO(data)
return decode_color(fp)
@register(ImageResourceID.LAYER_COMPS)
def _decode_layer_comps(data):
return _decode_descriptor_resource(data, LayerComps)
@register(ImageResourceID.MEASUREMENT_SCALE)
def _decode_measurement_scale(data):
return _decode_descriptor_resource(data, MeasurementScale)
@register(ImageResourceID.TIMELINE_INFO)
def _decode_timeline_information(data):
return _decode_descriptor_resource(data, TimelineInformation)
@register(ImageResourceID.SHEET_DISCLOSURE)
def _decode_sheet_disclosure(data):
return _decode_descriptor_resource(data, SheetDisclosure)
@register(ImageResourceID.ONION_SKINS)
def _decode_onion_skins(data):
return _decode_descriptor_resource(data, OnionSkins)
@register(ImageResourceID.COUNT_INFO)
def _decode_count_information(data):
return _decode_descriptor_resource(data, CountInformation)
@register(ImageResourceID.PRINT_INFO_CS5)
def _decode_print_information_cs5(data):
return _decode_descriptor_resource(data, PrintInformation)
@register(ImageResourceID.PRINT_STYLE)
def _decode_print_style(data):
return _decode_descriptor_resource(data, PrintStyle)
@register(ImageResourceID.PATH_SELECTION_STATE)
def _decode_path_selection_state(data):
return _decode_descriptor_resource(data, PathSelectionState)
@register(ImageResourceID.CLIPPING_PATH_NAME)
def _decode_clipping_path_name(data):
fp = io.BytesIO(data) # TODO: flatness and fill rule decoding?
return read_pascal_string(fp, 'ascii')
@register(ImageResourceID.ORIGIN_PATH_INFO)
def _decode_origin_path_info(data):
return _decode_descriptor_resource(data, OriginPathInfo)
| 34.446188 | 79 | 0.705136 | 484 | 0.031504 | 0 | 0 | 7,964 | 0.518388 | 0 | 0 | 2,638 | 0.171711 |
64146f732312850431c198582abd3211505c6c74 | 233 | py | Python | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | def palavras_repetidas(fileName, word):
file = open(fileName, 'r')
count = 0
for i in file.readlines():
if word in i:
count += 1
print(f'{word} aparece no arquivo {fileName} {count} vez(es).') | 29.125 | 67 | 0.575107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.253219 |
6415bbfefc664e5c565d40bc9a3fb40822f86293 | 7,415 | py | Python | Full_Version.py | Hermosa-Ren/Histogram_of_Oriented_Gradients | 7f50bcf130af3568d4bf3fe00ff09e123da48094 | [
"MIT"
] | null | null | null | Full_Version.py | Hermosa-Ren/Histogram_of_Oriented_Gradients | 7f50bcf130af3568d4bf3fe00ff09e123da48094 | [
"MIT"
] | null | null | null | Full_Version.py | Hermosa-Ren/Histogram_of_Oriented_Gradients | 7f50bcf130af3568d4bf3fe00ff09e123da48094 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
def Compute_Block(cell_gradient_box):
k=0
hog_vector = np.zeros((bin_size*4*(cell_gradient_box.shape[0] - 1)*(cell_gradient_box.shape[1] - 1)))
for i in range(cell_gradient_box.shape[0] - 1):
for j in range(cell_gradient_box.shape[1] - 1):
histogram_block = np.concatenate([cell_gradient_box[i][j],cell_gradient_box[i][j + 1],cell_gradient_box[i+1][j],cell_gradient_box[i+1][j + 1]])
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#做L2範數
L2_norm = histogram_block * histogram_block
L2_norm = L2_norm.sum()
L2_norm = np.power(L2_norm,0.5)
extre_min = np.power(0.0001,2) #創一個極小值 怕L2_norm為零 分母為零
L2_norm = L2_norm + extre_min
histogram_block = histogram_block / L2_norm
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block_L2')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#把histogram_block串接起來
hog_vector[36*k : 36*(k+1)] = histogram_block
k=k+1
return hog_vector
#計算直方圖
def Cell_Gradient(cell_mag, cell_angle):
histogram_cell = np.zeros(bin_size) # 0 20 40 60 80 100 120 140 160
for k in range(cell_size):
for l in range(cell_size):
cell_mag_catch = cell_mag[k][l] #讀取[0,0]幅值
cell_angle_catch = cell_angle[k][l]#讀取[0,0]角度值
if(cell_angle_catch % 20 == 0): #如果角度是0 20 40 60 80 100 120 140 160 180直接丟值進去
bin_number = int(cell_angle_catch / 20) % bin_size #有%bin_size是因為180要丟進0的裡面設計的
histogram_cell[bin_number] += cell_mag_catch
else:#其他角度要將幅值分配
bin_number_small = int(cell_angle_catch / 20) % bin_size
bin_number_big = (bin_number_small + 1) % bin_size #有%bin_size是因為假如bin_number_small為8的話再加1會變9也就是要放進第0格裡面
ratio = cell_angle_catch % 20 #依照比例丟進bin_number_small與bin_number_big
histogram_cell[bin_number_small] += (cell_mag_catch * (1 - (ratio / 20)))
histogram_cell[bin_number_big] += (cell_mag_catch * (ratio / 20))
#顯示直方圖
#x = np.arange(0,180,20)
#plt.xlabel("angle")
#plt.ylabel("mag")
#plt.title("Histogram of Gradient")
#plt.bar(x,histogram_cell,width = 3)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
return histogram_cell
def Computer_Cell(mag, angle):
cell_gradient_box = np.zeros(((int)(128 / cell_size), (int)(64 / cell_size), bin_size))
#輸入為128*64大小的影像應該會被分為->(16,8,9)
for i in range(cell_gradient_box.shape[0]): #先算cell左上角的格子的直方圖,左至右、上到下
for j in range(cell_gradient_box.shape[1]):
#找第0格~第8格的幅值
cell_mag = mag[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#找第0格~第8格的角度值
cell_angle = angle[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#計算直方圖
cell_gradient_box[i][j] = Cell_Gradient(cell_mag, cell_angle)
return cell_gradient_box
def Compute_Sobel(img):
gradient_values_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=1)
gradient_values_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gradient_values_x, gradient_values_y, angleInDegrees=True)
#angle範圍會為0~360之間,但我們只要0~180度
for i in range(angle.shape[0]):
for j in range(angle.shape[1]):
if(angle[i][j] > 180):
angle[i][j] = angle[i][j] - 180
'''
#顯示Compute_Sobel後的影像
while True:
abs_x = abs(gradient_values_x)
abs_x = np.uint8(abs_x)
cv2.namedWindow("gradient_values_x",0)
cv2.resizeWindow("gradient_values_x", 256, 512)
cv2.imshow("gradient_values_x",abs_x)
abs_y = abs(gradient_values_y)
abs_y = np.uint8(abs_y)
cv2.namedWindow("gradient_values_y",0)
cv2.resizeWindow("gradient_values_y", 256, 512)
cv2.imshow("gradient_values_y",abs_y)
mag_uint8 = np.uint8(mag)
cv2.namedWindow("mag",0)
cv2.resizeWindow("mag", 256, 512)
cv2.imshow("mag",mag_uint8)
k = cv2.waitKey(0)
if k == 27:
#按Esc
cv2.destroyAllWindows()
break
'''
return mag, angle
#Image_Pretreatment影像預處理
def Image_Pretreatment(img):
#resize調整大小
img_resize = cv2.resize(img, (64,128), interpolation=cv2.INTER_CUBIC)
img_resize_32 = np.float32(img_resize)
'''
#顯示影像
cv2.namedWindow("Resize",0)
cv2.resizeWindow("Resize", 256, 512)
cv2.imshow("Resize",img_resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#預處理(一)強度除以最大值
#img_after = (img_resize_32/np.max(img_resize_32))
#預處理(二)強度除以255
#img_after = (img_resize_32/255)
#預處理(三)gamma函式
#img_after = np.power(img_resize_32,0.9)
'''
img_after_uint8 = np.uint8(img_after)
cv2.namedWindow("img_after",0)
cv2.resizeWindow("img_after", 256, 512)
cv2.imshow("img_after", img_after_uint8)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#return img_after
return img_resize_32
#Histogram_of_Oriented_Gradients梯度方向直方圖
def Histogram_of_Oriented_Gradients():
#讀取灰階圖
img = cv2.imread(input_image_path,0)
#Image_Pretreatment影像預處理
img_finshed = Image_Pretreatment(img)
#計算Sobel
mag, angle = Compute_Sobel(img_finshed)
#計算Cell
cell_gradient_box = Computer_Cell(mag, angle)
#計算Block
hog_vector = Compute_Block(cell_gradient_box)
return hog_vector
if __name__ == '__main__':
#input_image_path = (r'路徑\檔名.png')
this_file_path = os.getcwd() #讀取當前資料夾位置
#input_image_path = (r'{}\running_man_1.png'.format(this_file_path))
input_image_path = (r'{}\running_man_2.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_3.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_4.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_5.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_6.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_7.png'.format(this_file_path))
#input_image_path = (r'{}\landscape.png'.format(this_file_path))
#參數
bin_size = 9
cell_size = 8
#執行程式
hog_vector = Histogram_of_Oriented_Gradients() #輸出為hog_vector
#print輸出長度
print ("輸出HOG長度為{}".format(hog_vector.shape[0]))
#將HOG輸出特徵向量可視化
x = np.arange(hog_vector.shape[0])
plt.title('HOG')
plt.bar(x,hog_vector,color='red')
#plt.savefig(r'{}\running_man_1_result.png'.format(this_file_path))
#plt.savefig(r'路徑\檔名.png')
plt.show()
| 35.649038 | 156 | 0.589076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,772 | 0.470031 |
6416796cf00258667b65d09bd35e52a24ddcd9ba | 145 | py | Python | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | null | null | null | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | 2 | 2020-10-01T16:13:37.000Z | 2020-10-30T19:12:38.000Z | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | 6 | 2020-10-03T09:04:26.000Z | 2022-01-09T11:57:40.000Z | n = int(input())
x = 0
for i in range(n):
l = set([j for j in input()])
if "+" in l:
x += 1
else :
x -= 1
print(x)
| 12.083333 | 33 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.02069 |
641735c3bfcea0e37882390480bc0b8efbfeee60 | 259 | py | Python | boj/dynamic/boj_2193.py | ruslanlvivsky/python-algorithm | 2b49bed33cd0e95b8a1e758008191f4392b3f667 | [
"MIT"
] | 3 | 2021-07-18T14:40:24.000Z | 2021-08-14T18:08:13.000Z | boj/dynamic/boj_2193.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | boj/dynamic/boj_2193.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | import sys
n = int(sys.stdin.readline().strip())
dp = [[0, 0] for _ in range(n + 1)]
dp[1][1] = 1
for i in range(2, n + 1):
dp[i][0] = dp[i - 1][0] + dp[i - 1][1]
dp[i][1] = dp[i - 1][0]
result = dp[n][0] + dp[n][1]
sys.stdout.write(str(result))
| 17.266667 | 42 | 0.501931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
64199ae9b43b9d4ae5575ea15f308f73ddc6c547 | 889 | py | Python | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | from django.conf import settings
from django.urls import path, register_converter
from mapentity.registry import registry
from rest_framework.routers import DefaultRouter
from geotrek.common.urls import LangConverter
from geotrek.feedback import models as feedback_models
from .views import CategoryList, FeedbackOptionsView, ReportAPIViewSet
register_converter(LangConverter, 'lang')
app_name = 'feedback'
urlpatterns = [
path('api/<lang:lang>/feedback/categories.json', CategoryList.as_view(), name="categories_json"),
path('api/<lang:lang>/feedback/options.json', FeedbackOptionsView.as_view(), name="options_json"),
]
router = DefaultRouter(trailing_slash=False)
router.register(r'^api/(?P<lang>[a-z]{2})/reports', ReportAPIViewSet, basename='report')
urlpatterns += router.urls
urlpatterns += registry.register(feedback_models.Report, menu=settings.REPORT_MODEL_ENABLED)
| 38.652174 | 102 | 0.80315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.191226 |
6419d40c21a4416e16add327518a46322b7368d5 | 2,527 | py | Python | tests/unit/utils/test_views.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | tests/unit/utils/test_views.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | tests/unit/utils/test_views.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import MagicMock, patch
from openslides.utils import views
@patch('builtins.super')
class SingleObjectMixinTest(TestCase):
def test_get_object_cache(self, mock_super):
"""
Test that the method get_object caches his result.
Tests that get_object from the django view is only called once, even if
get_object on our class is called twice.
"""
view = views.SingleObjectMixin()
view.get_object()
view.get_object()
mock_super().get_object.assert_called_once_with()
def test_dispatch_with_existin_object(self, mock_super):
view = views.SingleObjectMixin()
view.object = 'old_object'
view.get_object = MagicMock()
view.dispatch()
mock_super().dispatch.assert_called_with()
self.assertEqual(
view.object,
'old_object',
"view.object should not be changed")
self.assertFalse(
view.get_object.called,
"view.get_object() should not be called")
def test_dispatch_without_existin_object(self, mock_super):
view = views.SingleObjectMixin()
view.get_object = MagicMock(return_value='new_object')
view.dispatch()
mock_super().dispatch.assert_called_with()
self.assertEqual(
view.object,
'new_object',
"view.object should be changed")
self.assertTrue(
view.get_object.called,
"view.get_object() should be called")
class TestAPIView(TestCase):
def test_class_creation(self):
"""
Tests that the APIView has all relevant methods
"""
http_methods = set(('get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'))
self.assertTrue(
http_methods.issubset(views.APIView.__dict__),
"All http methods should be defined in the APIView")
self.assertFalse(
hasattr(views.APIView, 'method_call'),
"The APIView should not have the method 'method_call'")
class TestCSRFMixin(TestCase):
@patch('builtins.super')
def test_as_view(self, mock_super):
"""
Tests, that ensure_csrf_cookie is called.
"""
mock_super().as_view.return_value = 'super_view'
with patch('openslides.utils.views.ensure_csrf_cookie') as ensure_csrf_cookie:
views.CSRFMixin.as_view()
ensure_csrf_cookie.assert_called_once_with('super_view')
| 31.197531 | 97 | 0.638306 | 2,384 | 0.943411 | 0 | 0 | 1,838 | 0.727345 | 0 | 0 | 800 | 0.316581 |
641a520c95abe7f876833f68a738f9b1945352a3 | 1,619 | py | Python | librair/parsers.py | herreio/librair | 100bf2f07e9088e68d3a8609ac786a9455c57f1f | [
"MIT"
] | 1 | 2020-02-09T13:31:51.000Z | 2020-02-09T13:31:51.000Z | librair/parsers.py | herreio/librair | 100bf2f07e9088e68d3a8609ac786a9455c57f1f | [
"MIT"
] | 1 | 2022-02-02T13:54:15.000Z | 2022-02-02T13:54:15.000Z | librair/parsers.py | herreio/librair | 100bf2f07e9088e68d3a8609ac786a9455c57f1f | [
"MIT"
] | 1 | 2020-02-09T13:39:30.000Z | 2020-02-09T13:39:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .schemas import plain
from .protocols import http
class Beacon:
"""
Beacon parser for files exposed by given path or url
"""
def __init__(self, path="", url=""):
self.path = path
self.url = url
self.raw = []
self.meta = {}
self.links = []
self.targets = []
self.size = 0
if path != "":
self._read()
if url != "":
self._retrieve()
self._parse()
def __str__(self):
repr = []
for k in self.meta:
ws = 15 - len(k)
repr.append(k + ws * " " + self.meta[k])
return "\n".join(repr)
def _retrieve(self):
self.raw = http.response_text(http.get_request(self.url)).split("\n")
def _read(self):
self.raw = plain.reader(self.path)
def _parse(self):
temp = [r for r in self.raw[:20] if r.startswith("#")]
temp = [t.replace("#", "").split(":", 1) for t in temp]
temp = [[t[0].strip(), t[1].strip()] for t in temp]
for t in temp:
self.meta[t[0]] = t[1]
temp = [r for r in self.raw if "#" not in r and r != ""]
self.size = len(temp)
temp = [t.split("|") for t in temp]
for t in temp:
self.links.append(t[0])
self.targets.append(t[1:]) if len(t[1:]) > 1 \
else self.targets.append(t[1])
def save(self, file="beacon.txt", path="."):
"""
save beacon data to given path and file
"""
plain.writer(self.raw, file=file, path=path)
| 27.440678 | 77 | 0.493515 | 1,513 | 0.934527 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.141445 |
641b7e277781a28e894e621e77d9ddecff88e651 | 700 | py | Python | flask_wdb.py | techniq/flask-wdb | 5f71aa21b21d4101f3aac197941269e763e2593a | [
"BSD-3-Clause"
] | 7 | 2015-11-05T18:29:15.000Z | 2019-06-03T16:00:47.000Z | flask_wdb.py | techniq/flask-wdb | 5f71aa21b21d4101f3aac197941269e763e2593a | [
"BSD-3-Clause"
] | 2 | 2015-09-04T03:24:14.000Z | 2021-01-31T10:10:49.000Z | flask_wdb.py | techniq/flask-wdb | 5f71aa21b21d4101f3aac197941269e763e2593a | [
"BSD-3-Clause"
] | 1 | 2018-03-12T09:09:31.000Z | 2018-03-12T09:09:31.000Z | from flask import Flask
from wdb.ext import WdbMiddleware
class Wdb(object):
def __init__(self, app=None):
self.app = app
if app:
self.init_app(self.app)
def init_app(self, app):
if app.config.get('WDB_ENABLED', app.debug):
start_disabled = app.config.get('WDB_START_DISABLED', False)
theme = app.config.get('WDB_THEME', 'dark')
app.wsgi_app = WdbMiddleware(app.wsgi_app, start_disabled, theme)
# Patch app.run to disable Werkzeug debugger
app.run = self._run
def _run(self, *args, **kwargs):
kwargs["use_debugger"] = False
return Flask.run(self.app, *args, **kwargs)
| 26.923077 | 77 | 0.611429 | 639 | 0.912857 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.154286 |
641d0a7429ff342f65694bfa35215c3f1a6b06a3 | 16,130 | py | Python | uib_experiments/experiment/experiment.py | miquelmn/uib_experiments | d9cdb72758f7c3862dcb32374d06cce05938f692 | [
"MIT"
] | null | null | null | uib_experiments/experiment/experiment.py | miquelmn/uib_experiments | d9cdb72758f7c3862dcb32374d06cce05938f692 | [
"MIT"
] | null | null | null | uib_experiments/experiment/experiment.py | miquelmn/uib_experiments | d9cdb72758f7c3862dcb32374d06cce05938f692 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Experiment module.
This module contains a set of function and classes to handles experiments. The aim of these methods
is to be able to save results easily with an standard format.
Written by: Miquel Miró Nicolau
"""
from typing import Union, Tuple, List
import os
import glob
import pickle
import re
import time
import datetime
import warnings
import json
from collections.abc import Iterable
import cv2
import numpy as np
from matplotlib import pyplot as plt
from ..data import dades
from ..database_model import database
Num = Union[int, float]
DataExperiment = Union[dades.Data, List[dades.Data]]
READ_FROM_KEYBOARD = True
DONT_WRITE_TK = "REM"
class Experiment:
""" Class to handle different experiment.
An experiment is defined by a number and a path. The number is the way to identify the
experiment while the path is the location where the results will be saved.
Args:
path (str): Path where the different experiments will be saved.
num_exp (int): The number of experiment. If the argument has the default value search check
the folder for the last experiment.
"""
def __init__(self, path: str, logger, num_exp: int = -1, explanation: str = None,
params=None, database_name: str = None):
if num_exp < 0: # Is not set, we're going to get automatic the number
exps = list(glob.iglob(os.path.join(path, "exp_*")))
exps = sorted(exps,
key=lambda x: float(os.path.split(x)[-1].split(".")[0].split("_")[-1]))
if len(exps) > 0:
num_exp = int(os.path.split(exps[-1])[-1].split(".")[0].split("_")[-1]) + 1
else:
num_exp = 1
self._logger = logger
self._num_exp = num_exp
self.__root_path = path
self._path = os.path.join(path, "exp_" + str(num_exp))
self._start_time = 0
self._end_time = 0
if READ_FROM_KEYBOARD and explanation is None:
explanation = input("Enter an explanation for the experiment: ")
self.__description = explanation
self._extra_text = None
self.__params = params
self.__results = None
self.__random_state = np.random.get_state()[1][0]
if database_name is not None:
db = database.ExperimentDB()
db.start(os.path.join(path, database_name))
else:
db = None
self.__database_name = database_name
self.__database = db
self.__database_object = None
@property
def db_object(self):
return self.__database_object
@db_object.setter
def db_object(self, value: database.Experiment):
self.__database_object = value
@property
def path(self):
return self._path
@property
def description(self):
return self.__description
@property
def time(self):
if self.end_time is not None:
elapsed_time = self.end_time - self.start_time
else:
elapsed_time = -1
return elapsed_time
@property
def results(self) -> dict:
return self.__results
@results.setter
def results(self, value: dict):
self._logger.info(f"Metrics {value}")
if self.__database is not None:
self.__database.add_metrics(self, value)
self.__results = value
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
@property
def random_state(self):
return self.__random_state
@property
def description(self) -> str:
return self.__description
@property
def params(self):
return self.__params
@params.setter
def params(self, values):
self._logger.info(f"Params {values}")
self.__params = values
def get_num_exp(self) -> int:
return self._num_exp
def init(self):
""" Initializes the experiment. """
if self.__description != DONT_WRITE_TK:
Experiment._create_folder(self._path)
self._start_time = time.time()
self._logger.info(f"Experiment {self._num_exp} has started.")
def finish(self, results=None):
""" Finishes the experiment.
Raises:
RuntimeError when the experiment was not started
"""
if self._start_time == 0:
raise RuntimeError("ERROR: Trying to finish a non initialized experiment.")
self._end_time = time.time()
path = os.path.join(self._path, "experiment_resume.txt")
if self.__description != DONT_WRITE_TK:
with open(path, "w") as text_file:
text_file.write(self.__get_resume())
if self.__database is not None:
self.__database.add_experiment(experiment=self, params=self.params, results=results)
with open(os.path.join(self._path, "experiment.json"), "w") as outfile:
json.dump(self.export(), outfile)
self._logger.info(
f"Experiment {self._num_exp} finished after {self.time}.")
def add_metrics(self, metrics: dict, theta: int = None):
""" Add metrics to experiment.
Add metrics to the experiment. In the case that there is a database object also update it
to contain this information.
Args:
metrics: Dictionary containing the metrics in a {metric_name => metric_value}ç
theta: (optional) Integer indicating the theta value of the experiment.
"""
self._logger.info(f"Metrics {metrics}")
if self.__database is not None:
self.__database.add_metrics(self, metrics, theta=theta)
def set_explanation(self, explanation: str):
""" Warning: Deprecated
Sets the description of the algorithm
Args:
explanation (str): Explanation of the algorithm.
"""
warnings.warn("Endpoint deprecated, use instead set_description")
self.set_description(explanation)
def set_description(self, description: str):
""" Sets the description of the algorithm
Args:
description (str): Explanation of the algorithm.
"""
self.__description = description
def __get_resume(self) -> str:
""" Resume of the experiment.
Constructs an string with information about the experiment.
"""
resum = "%s \tExperiment %s started" % (
datetime.datetime.fromtimestamp(self._start_time).strftime("%d/%m/%Y %H:%M:%S"),
str(self._num_exp))
if self.__description is not None:
resum += "\n\t\t\t%s" % self.__description
if self.__params is not None:
resum += "\n\t\t\targs: " + str(self.__params)
if self._extra_text is not None:
resum = resum + "\n\t\t\t %s" % self._extra_text
resum += f"\n\t\t\tElapsed time {self.time} minutes"
resum += "\n%s \tExperiment %s finished" % (
datetime.datetime.fromtimestamp(self._end_time).strftime("%d/%m/%Y %H:%M:%S"),
str(self._num_exp))
return resum
def save_result(self, dada: DataExperiment):
"""
Args:
dada:
"""
if self.__description != DONT_WRITE_TK:
if isinstance(dada, List):
self.__save_results_batch(dada)
else:
self.__save_result_single(dada)
def __save_result_single(self, dada: dades.Data):
"""
Args:
dada:
"""
storage_type = dada.storage_type
if dades.Data.is_image(storage_type):
self._save_data_img(dada)
elif storage_type == dades.STORAGES_TYPES[2]:
self._save_string(dada)
elif storage_type == dades.STORAGES_TYPES[3]:
self.__save_object(dada)
elif storage_type in (dades.STORAGES_TYPES[4], dades.STORAGES_TYPES[7]):
self._save_coordinates(dada)
elif storage_type == dades.STORAGES_TYPES[1]:
self._save_coordinates_image(dada)
elif storage_type == dades.STORAGES_TYPES[10]:
self._save_coordinates_values_images(dada)
def __save_object(self, data: dades.Data):
""" Pickle object
"""
path, name = self._create_folders_for_data(data)
with open(path + "/" + name + '.pickle', 'wb') as handle:
pickle.dump(data.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
return None
def __save_results_batch(self, datas: List[dades.Data]):
""" Save data of the experiment.
Saves a list of multiples data.
Args:
datas (List of data):
Returns:
"""
[self.save_result(dat) for dat in datas]
def _save_coordinates_values_images(self, datas: dades.Data) -> None:
""" Save image with value for coordinates.
Expects three types of data. The first of all the image. An image is a numpy matrix. The
second one the coordinates. Should be a array with two columns one for every dimension.
The third one a value for every one of the coordinates.
Save an image with the values drawed over the original image in the points indicated by the
coordinates.
Args:
datas:
Returns:
None
Raises:
Value error if the values and the coordinates are not of the same length.
"""
image, coordinates, values = datas.data
image = np.copy(image)
if len(coordinates) != len(values):
raise ValueError("Coordinates and values should have the same length.")
image[image > values.max()] = values.max() + 5
curv_img = Experiment._draw_points(image, coordinates, values, 0).astype(np.uint8) * 255
curv_img = Experiment.__apply_custom_colormap(curv_img)
self.__save_img(datas, curv_img)
def add_text(self, text: str) -> None:
"""
Add a text into the resume file.
Args:
text: String with the text to add
"""
if self._extra_text is None:
self._extra_text = text
else:
self._extra_text = self._extra_text + "\n" + text
def _save_coordinates_image(self, data: dades.Data) -> None:
"""
Args:
data:
Returns:
"""
image, coordinates = data.data
res_image = Experiment._draw_points(image, coordinates, values=image.max() // 2, side=2)
self.__save_img(data, res_image)
def _save_coordinates(self, data: dades.Data) -> None:
"""
Args:
data:
Returns:
Raises:
"""
dat = data.data
if not isinstance(dat, np.ndarray):
raise ValueError("Not a valid data for the coordinates.")
path, name = self._create_folders_for_data(data)
np.savetxt(os.path.join(path, name + ".csv"), dat, delimiter=",")
def _save_data_img(self, data: dades.Data) -> None:
""" Save the image.
The image is saved on the path result of the combination of the global path of the class
and the local one set in the data parameter.
Args:
data (dades.Data):
Returns:
"""
self.__save_img(data, data.data)
def __save_img(self, data: dades.Data, image: np.ndarray):
path, name = self._create_folders_for_data(data)
if not re.match(".*\..{3}$", name):
name = name + ".png"
cv2.imwrite(os.path.join(path, name), image)
def _save_string(self, data: dades.Data) -> None:
"""
Args:
data:
Returns:
"""
path, _ = self._create_folders_for_data(data)
with open(path, "w") as text_file:
text_file.write(data.data)
def _create_folders_for_data(self, data: dades.Data) -> Tuple[str, str]:
""" Create recursively the folder tree.
Args:
data:
Returns:
"""
path = os.path.join(self._path, data.path)
Experiment._create_folder(path)
name = data.name
if name is None:
files = list(glob.iglob(os.path.join(path, "*")))
name = str(len(files))
return path, name
@staticmethod
def _create_folder(path):
""" Create recursively the folder tree.
Args:
path:
Returns:
"""
if not os.path.exists(path):
os.makedirs(path)
return path
@staticmethod
def _draw_points(img, points, values, side=0):
""" Draw the value in the points position on the image. The drawing function used
is a square, the side is the length of the square
Args:
img:
points:
values:
side:
Returns:
"""
mask = np.copy(img)
mask = mask.astype(np.float32)
i = 0
for point in points:
if isinstance(values, Iterable):
val = values[i]
else:
val = values
if side == 0:
mask[point[1], point[0]] = val
else:
mask[int(point[1] - side): int(point[1] + side),
int(point[0] - side): int(point[0] + side)] = val
i = i + 1
return mask
@staticmethod
def __apply_custom_colormap(image_gray, cmap=plt.get_cmap('viridis')):
""" Applies a CMAP from matplotlib to a gray-scale image.
Args:
image_gray:
cmap:
Returns:
"""
assert image_gray.dtype == np.uint8, 'must be np.uint8 image'
if image_gray.ndim == 3: image_gray = image_gray.squeeze(-1)
# Initialize the matplotlib color map
sm = plt.cm.ScalarMappable(cmap=cmap)
# Obtain linear color range
color_range = sm.to_rgba(np.linspace(0, 1, 256))[:, 0:3] # color range RGBA => RGB
color_range = (color_range * 255.0).astype(np.uint8) # [0,1] => [0,255]
color_range = np.squeeze(
np.dstack([color_range[:, 2], color_range[:, 1], color_range[:, 0]]),
0) # RGB => BGR
# Apply colormap for each channel individually
channels = [cv2.LUT(image_gray, color_range[:, i]) for i in range(3)]
return np.dstack(channels)
def export(self) -> dict:
""" Exports experiment to a dict.
Returns:
Experiment info
"""
info = {'path': self.__root_path, 'description': self.__description,
'num_exp': self._num_exp, 'random_state': int(self.__random_state),
'end_time': self._end_time, 'start_time': self._start_time, 'params': self.params,
'database_name': self.__database_name}
if self.__params is not None:
info['params'] = self.__params
if self.__database is not None:
info['id_database'] = self.__database_object.exp_id
return info
@staticmethod
def import_from_json(path: str, logger):
""" Import Experiment info from json file.
Imports the Experiment information previously exported with the respective function.
Args:
path (str): Path to the file containing the info.
logger (logger): Python logger object
Returns:
Experiment object with the information found in the file (path).
"""
exp = None
with open(path, "r") as infile:
data = json.load(infile)
exp = Experiment(path=data['path'], num_exp=data['num_exp'], params=data['params'],
explanation=data['description'], database_name=data['database_name'],
logger=logger)
if 'id_database' in data:
db_exp = database.Experiment.get(exp_id=data['id_database'])
exp.db_object = db_exp
return exp
| 28.548673 | 99 | 0.588593 | 15,445 | 0.957414 | 0 | 0 | 4,282 | 0.265435 | 0 | 0 | 5,334 | 0.330647 |
641d5ed927aa7dc57c1e388779384eb82bc87778 | 507 | py | Python | Leetcode/0498. Diagonal Traverse.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | 1 | 2021-07-15T18:40:26.000Z | 2021-07-15T18:40:26.000Z | Leetcode/0498. Diagonal Traverse.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | Leetcode/0498. Diagonal Traverse.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | import collections
class Solution:
def findDiagonalOrder(self, matrix: list[list[int]]) -> list[int]:
hashmap = collections.defaultdict(list)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
hashmap[i + j].append(matrix[i][j])
result = []
for entry in hashmap.items():
if entry[0] % 2 == 0:
result.extend(entry[1][::-1])
else:
result.extend(entry[1])
return result
| 29.823529 | 70 | 0.524655 | 485 | 0.956607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
641df5fb5db24eb21cd2895f9c79ad3ad2b58d9b | 321 | py | Python | src/mars_profiling/__init__.py | wjsi/mars-profiling | 1accb00c90da67b46ad98ea1592fecc524625454 | [
"MIT"
] | 1 | 2021-01-21T09:11:58.000Z | 2021-01-21T09:11:58.000Z | src/mars_profiling/__init__.py | wjsi/mars-profiling | 1accb00c90da67b46ad98ea1592fecc524625454 | [
"MIT"
] | null | null | null | src/mars_profiling/__init__.py | wjsi/mars-profiling | 1accb00c90da67b46ad98ea1592fecc524625454 | [
"MIT"
] | 1 | 2020-12-30T08:32:29.000Z | 2020-12-30T08:32:29.000Z | """Main module of pandas-profiling.
.. include:: ../../README.md
"""
from mars_profiling.config import Config, config
from mars_profiling.controller import pandas_decorator
from mars_profiling.profile_report import ProfileReport
from mars_profiling.version import __version__
clear_config = ProfileReport.clear_config
| 26.75 | 55 | 0.82243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.214953 |
641dfd3f82c50b129dcf2d935fc5c8f6104d2c1a | 161 | py | Python | revscoring/revscoring/languages/features/dictionary/tests/test_util.py | yafeunteun/wikipedia-spam-classifier | fca782b39b287fbc0b2dd54f8e2bf33c6d3bc519 | [
"MIT"
] | 2 | 2016-10-26T18:58:53.000Z | 2017-06-22T20:11:20.000Z | revscoring/revscoring/languages/features/dictionary/tests/test_util.py | yafeunteun/wikipedia-spam-classifier | fca782b39b287fbc0b2dd54f8e2bf33c6d3bc519 | [
"MIT"
] | null | null | null | revscoring/revscoring/languages/features/dictionary/tests/test_util.py | yafeunteun/wikipedia-spam-classifier | fca782b39b287fbc0b2dd54f8e2bf33c6d3bc519 | [
"MIT"
] | null | null | null | from nose.tools import eq_
from ..util import utf16_cleanup
def test_utf16_cleanup():
eq_(utf16_cleanup("Foobar" + chr(2 ** 16)),
"Foobar\uFFFD")
| 17.888889 | 47 | 0.677019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.136646 |
641e664d10bd57eb59a8d00d6a45b411c67df279 | 128 | py | Python | whereToGo/src/sensors/_conf_sensors.py | k323r/whereToGo | 5ec94a49e818c6a2acfb08c66755100c26c42f22 | [
"MIT"
] | null | null | null | whereToGo/src/sensors/_conf_sensors.py | k323r/whereToGo | 5ec94a49e818c6a2acfb08c66755100c26c42f22 | [
"MIT"
] | null | null | null | whereToGo/src/sensors/_conf_sensors.py | k323r/whereToGo | 5ec94a49e818c6a2acfb08c66755100c26c42f22 | [
"MIT"
] | null | null | null | import os.path
ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)))
| 16 | 37 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
641e70a5914ffc3dba0ea328a0529e8b586a37b9 | 2,533 | py | Python | tools/fileinfo/features/eziriz-packer-detection/test.py | stepanek-m/retdec-regression-tests | 12b834b14ede2826fec451368fa8192ab00ddadf | [
"MIT"
] | null | null | null | tools/fileinfo/features/eziriz-packer-detection/test.py | stepanek-m/retdec-regression-tests | 12b834b14ede2826fec451368fa8192ab00ddadf | [
"MIT"
] | null | null | null | tools/fileinfo/features/eziriz-packer-detection/test.py | stepanek-m/retdec-regression-tests | 12b834b14ede2826fec451368fa8192ab00ddadf | [
"MIT"
] | null | null | null | from regression_tests import *
class Eziriz42Test(Test):
settings = TestSettings(
tool='fileinfo',
args='--json --verbose',
input=['x86-pe-ff10e014c94cbc89f9e653bc647b6d5a', 'x86-pe-d5a674ff381b95f36f3f4ef3e5a8d0c4-eziriz42']
)
def test_fileinfo_json_output_is_correctly_parsed(self):
assert self.fileinfo.succeeded
self.assertEqual(self.fileinfo.output['fileFormat'], 'PE')
self.assertEqual(self.fileinfo.output['dataDirectories']['numberOfDataDirectories'], '16')
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['index'], '14')
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['address'], '0')
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['size'], '0')
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['type'], 'CLR runtime header')
self.assertEqual(self.fileinfo.output['tools'][0]['name'], 'Eziriz .NET Reactor')
self.assertEqual(self.fileinfo.output['tools'][0]['version'], '4.2')
self.assertEqual(self.fileinfo.output['languages'][0]['name'], 'CIL/.NET')
self.assertTrue(self.fileinfo.output['languages'][0]['bytecode'])
class Eziriz50Test(Test):
settings = TestSettings(
tool='fileinfo',
args='--json --verbose',
input='x86-pe-08f9c6c1cfb53ece69025050c95fcd5e-eziriz5'
)
def test_fileinfo_json_output_is_correctly_parsed(self):
assert self.fileinfo.succeeded
self.assertEqual(self.fileinfo.output['fileFormat'], 'PE')
self.assertEqual(self.fileinfo.output['dataDirectories']['numberOfDataDirectories'], '15')
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['index'], '14')
self.assertTrue(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['address'] != 0)
self.assertTrue(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['size'] != 0)
self.assertEqual(self.fileinfo.output['dataDirectories']['dataDirectoryEntries'][14]['type'], 'CLR runtime header')
self.assertEqual(self.fileinfo.output['tools'][0]['name'], 'Eziriz .NET Reactor')
self.assertEqual(self.fileinfo.output['tools'][0]['version'], '4.8 - 5.0')
self.assertEqual(self.fileinfo.output['languages'][0]['name'], 'CIL/.NET')
self.assertTrue(self.fileinfo.output['languages'][0]['bytecode'])
| 57.568182 | 123 | 0.69167 | 2,498 | 0.986182 | 0 | 0 | 0 | 0 | 0 | 0 | 954 | 0.376629 |
641e9e0dce5e1416f0e6f8b2e384e21e197b3478 | 160 | py | Python | Codigo_EngDados/exe03.py | SouzaMarcel0/Exercicios_Python | d0c75292b7c725faeaf49db161b7d1a0f71a9418 | [
"MIT"
] | null | null | null | Codigo_EngDados/exe03.py | SouzaMarcel0/Exercicios_Python | d0c75292b7c725faeaf49db161b7d1a0f71a9418 | [
"MIT"
] | null | null | null | Codigo_EngDados/exe03.py | SouzaMarcel0/Exercicios_Python | d0c75292b7c725faeaf49db161b7d1a0f71a9418 | [
"MIT"
] | null | null | null |
client = pymongo.MongoClient("mongodb+srv://usermgs:udUnpg6aJnCp9d2W@cluster0.2ivys.mongodb.net/baseteste?retryWrites=true&w=majority")
db = client.test
| 16 | 135 | 0.78125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.65625 |
641ed17899fa236582e1997b155d0d90fa4f025d | 1,340 | py | Python | clean_crime_file.py | sachinmyneni/fundata | 20d8cd26f08803627debfd81077d34850ff42ab9 | [
"MIT"
] | null | null | null | clean_crime_file.py | sachinmyneni/fundata | 20d8cd26f08803627debfd81077d34850ff42ab9 | [
"MIT"
] | null | null | null | clean_crime_file.py | sachinmyneni/fundata | 20d8cd26f08803627debfd81077d34850ff42ab9 | [
"MIT"
] | null | null | null | import pandas as pd
import re
import logging
def badrow(address: str, city: str) -> bool:
return city.split('_')[0].lower() not in address.lower()
# logging.basicConfig(filename="spotcrime_scrape.log", level=logging.DEBUG,
# filemode='a', format='%(asctime)s %(message)s')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
crime_file = './spotcrime.csv.2'
try:
spotcrime_df = pd.read_csv(crime_file, header=0)
print(spotcrime_df.head())
except FileNotFoundError:
logging.fatal(f"{crime_file} not found.")
except pd.errors.EmptyDataError:
logging.fatal(f"{crime_file} had no data.")
print(f"Shape before filter: {spotcrime_df.shape}")
# spotcrime_df.filter(badrow(spotcrime_df['Address'],spotcrime_df['Place']))
df_new = spotcrime_df[spotcrime_df.apply(lambda x: x['Place'].split('_')[0].lower() in x['Address'].lower(), axis=1)]
print(f"Shape after filter: {df_new.shape}")
df_new.to_csv('sc_cleaned.csv',header=True, index=False)
| 34.358974 | 117 | 0.727612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.484328 |
641eff21d70370ab332090e7949322ad7a153190 | 13,743 | py | Python | flash/data/data_module.py | edgarriba/lightning-flash | 62472d7615d95990df92a8a2f92f80af1ab737ac | [
"Apache-2.0"
] | 1 | 2021-05-03T06:42:34.000Z | 2021-05-03T06:42:34.000Z | flash/data/data_module.py | edgarriba/lightning-flash | 62472d7615d95990df92a8a2f92f80af1ab737ac | [
"Apache-2.0"
] | null | null | null | flash/data/data_module.py | edgarriba/lightning-flash | 62472d7615d95990df92a8a2f92f80af1ab737ac | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from typing import Any, Callable, Dict, Optional, Tuple, Union
import pytorch_lightning as pl
import torch
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import Subset
from flash.data.auto_dataset import AutoDataset
from flash.data.data_pipeline import DataPipeline, Postprocess, Preprocess
class DataModule(pl.LightningDataModule):
"""Basic DataModule class for all Flash tasks
Args:
train_dataset: Dataset for training. Defaults to None.
val_dataset: Dataset for validating model performance during training. Defaults to None.
test_dataset: Dataset to test model performance. Defaults to None.
predict_dataset: Dataset to predict model performance. Defaults to None.
num_workers: The number of workers to use for parallelized loading. Defaults to None.
predict_ds: Dataset for predicting. Defaults to None.
batch_size: The batch size to be used by the DataLoader. Defaults to 1.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads,
or 0 for Darwin platform.
"""
preprocess_cls = Preprocess
postprocess_cls = Postprocess
def __init__(
self,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
batch_size: int = 1,
num_workers: Optional[int] = None,
) -> None:
super().__init__()
self._train_ds = train_dataset
self._val_ds = val_dataset
self._test_ds = test_dataset
self._predict_ds = predict_dataset
if self._train_ds:
self.train_dataloader = self._train_dataloader
if self._val_ds:
self.val_dataloader = self._val_dataloader
if self._test_ds:
self.test_dataloader = self._test_dataloader
if self._predict_ds:
self.predict_dataloader = self._predict_dataloader
self.batch_size = batch_size
# TODO: figure out best solution for setting num_workers
if num_workers is None:
num_workers = 0 if platform.system() == "Darwin" else os.cpu_count()
self.num_workers = num_workers
self._preprocess = None
self._postprocess = None
# this may also trigger data preloading
self.set_running_stages()
@staticmethod
def get_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, default: Optional[Any] = None) -> Any:
if isinstance(dataset, Subset):
return getattr(dataset.dataset, attr_name, default)
return getattr(dataset, attr_name, default)
@staticmethod
def set_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, value: Any) -> None:
if isinstance(dataset, Subset):
dataset = dataset.dataset
setattr(dataset, attr_name, value)
def set_running_stages(self):
if self._train_ds:
self.set_dataset_attribute(self._train_ds, 'running_stage', RunningStage.TRAINING)
if self._val_ds:
self.set_dataset_attribute(self._val_ds, 'running_stage', RunningStage.VALIDATING)
if self._test_ds:
self.set_dataset_attribute(self._test_ds, 'running_stage', RunningStage.TESTING)
if self._predict_ds:
self.set_dataset_attribute(self._predict_ds, 'running_stage', RunningStage.PREDICTING)
def _resolve_collate_fn(self, dataset: Dataset, running_stage: RunningStage) -> Optional[Callable]:
if isinstance(dataset, AutoDataset):
return self.data_pipeline.worker_preprocessor(running_stage)
def _train_dataloader(self) -> DataLoader:
train_ds: Dataset = self._train_ds() if isinstance(self._train_ds, Callable) else self._train_ds
return DataLoader(
train_ds,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self._resolve_collate_fn(train_ds, RunningStage.TRAINING)
)
def _val_dataloader(self) -> DataLoader:
val_ds: Dataset = self._val_ds() if isinstance(self._val_ds, Callable) else self._val_ds
return DataLoader(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self._resolve_collate_fn(val_ds, RunningStage.VALIDATING)
)
def _test_dataloader(self) -> DataLoader:
test_ds: Dataset = self._test_ds() if isinstance(self._test_ds, Callable) else self._test_ds
return DataLoader(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self._resolve_collate_fn(test_ds, RunningStage.TESTING)
)
def _predict_dataloader(self) -> DataLoader:
predict_ds: Dataset = self._predict_ds() if isinstance(self._predict_ds, Callable) else self._predict_ds
return DataLoader(
predict_ds,
batch_size=min(self.batch_size,
len(predict_ds) if len(predict_ds) > 0 else 1),
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self._resolve_collate_fn(predict_ds, RunningStage.PREDICTING)
)
def generate_auto_dataset(self, *args, **kwargs):
if all(a is None for a in args) and len(kwargs) == 0:
return None
return self.data_pipeline._generate_auto_dataset(*args, **kwargs)
@property
def preprocess(self) -> Preprocess:
return self._preprocess or self.preprocess_cls()
@property
def postprocess(self) -> Postprocess:
return self._postprocess or self.postprocess_cls()
@property
def data_pipeline(self) -> DataPipeline:
return DataPipeline(self.preprocess, self.postprocess)
@staticmethod
def _check_transforms(transform: Dict[str, Union[Module, Callable]]) -> Dict[str, Union[Module, Callable]]:
if not isinstance(transform, dict):
raise MisconfigurationException(
"Transform should be a dict. Here are the available keys "
f"for your transforms: {DataPipeline.PREPROCESS_FUNCS}."
)
return transform
@classmethod
def autogenerate_dataset(
cls,
data: Any,
running_stage: RunningStage,
whole_data_load_fn: Optional[Callable] = None,
per_sample_load_fn: Optional[Callable] = None,
data_pipeline: Optional[DataPipeline] = None,
) -> AutoDataset:
"""
This function is used to generate an ``AutoDataset`` from a ``DataPipeline`` if provided
or from the provided ``whole_data_load_fn``, ``per_sample_load_fn`` functions directly
"""
if whole_data_load_fn is None:
whole_data_load_fn = getattr(
cls.preprocess_cls,
DataPipeline._resolve_function_hierarchy('load_data', cls.preprocess_cls, running_stage, Preprocess)
)
if per_sample_load_fn is None:
per_sample_load_fn = getattr(
cls.preprocess_cls,
DataPipeline._resolve_function_hierarchy('load_sample', cls.preprocess_cls, running_stage, Preprocess)
)
return AutoDataset(data, whole_data_load_fn, per_sample_load_fn, data_pipeline, running_stage=running_stage)
@staticmethod
def train_val_test_split(
dataset: torch.utils.data.Dataset,
train_split: Optional[Union[float, int]] = None,
val_split: Optional[Union[float, int]] = None,
test_split: Optional[Union[float, int]] = None,
seed: Optional[int] = 1234,
) -> Tuple[Dataset, Optional[Dataset], Optional[Dataset]]:
"""Returns split Datasets based on train, valid & test split parameters
Args:
dataset: Dataset to be split.
train_split: If Float, ratio of data to be contained within the train dataset. If Int,
number of samples to be contained within train dataset.
val_split: If Float, ratio of data to be contained within the validation dataset. If Int,
number of samples to be contained within test dataset.
test_split: If Float, ratio of data to be contained within the test dataset. If Int,
number of samples to be contained within test dataset.
seed: Used for the train/val splits when val_split is not None.
"""
n = len(dataset)
if test_split is None:
_test_length = 0
elif isinstance(test_split, float):
_test_length = int(n * test_split)
else:
_test_length = test_split
if val_split is None:
_val_length = 0
elif isinstance(val_split, float):
_val_length = int(n * val_split)
else:
_val_length = val_split
if train_split is None:
_train_length = n - _val_length - _test_length
elif isinstance(train_split, float):
_train_length = int(n * train_split)
else:
_train_length = train_split
if seed:
generator = torch.Generator().manual_seed(seed)
else:
generator = None
train_ds, val_ds, test_ds = torch.utils.data.random_split(
dataset, [_train_length, _val_length, _test_length], generator
)
if val_split is None:
val_ds = None
if test_split is None:
test_ds = None
return train_ds, val_ds, test_ds
@classmethod
def _generate_dataset_if_possible(
cls,
data: Optional[Any],
running_stage: RunningStage,
whole_data_load_fn: Optional[Callable] = None,
per_sample_load_fn: Optional[Callable] = None,
data_pipeline: Optional[DataPipeline] = None
) -> Optional[AutoDataset]:
if data is None:
return
if data_pipeline:
return data_pipeline._generate_auto_dataset(data, running_stage=running_stage)
return cls.autogenerate_dataset(data, running_stage, whole_data_load_fn, per_sample_load_fn, data_pipeline)
@classmethod
def from_load_data_inputs(
cls,
train_load_data_input: Optional[Any] = None,
val_load_data_input: Optional[Any] = None,
test_load_data_input: Optional[Any] = None,
predict_load_data_input: Optional[Any] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
**kwargs,
) -> 'DataModule':
"""
This functions is an helper to generate a ``DataModule`` from a ``DataPipeline``.
Args:
cls: ``DataModule`` subclass
train_load_data_input: Data to be received by the ``train_load_data`` function from this ``Preprocess``
val_load_data_input: Data to be received by the ``val_load_data`` function from this ``Preprocess``
test_load_data_input: Data to be received by the ``test_load_data`` function from this ``Preprocess``
predict_load_data_input: Data to be received by the ``predict_load_data`` function from this ``Preprocess``
kwargs: Any extra arguments to instantiate the provided ``DataModule``
"""
# trick to get data_pipeline from empty DataModule
if preprocess or postprocess:
data_pipeline = DataPipeline(
preprocess or cls(**kwargs).preprocess,
postprocess or cls(**kwargs).postprocess,
)
else:
data_pipeline = cls(**kwargs).data_pipeline
train_dataset = cls._generate_dataset_if_possible(
train_load_data_input, running_stage=RunningStage.TRAINING, data_pipeline=data_pipeline
)
val_dataset = cls._generate_dataset_if_possible(
val_load_data_input, running_stage=RunningStage.VALIDATING, data_pipeline=data_pipeline
)
test_dataset = cls._generate_dataset_if_possible(
test_load_data_input, running_stage=RunningStage.TESTING, data_pipeline=data_pipeline
)
predict_dataset = cls._generate_dataset_if_possible(
predict_load_data_input, running_stage=RunningStage.PREDICTING, data_pipeline=data_pipeline
)
datamodule = cls(
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
predict_dataset=predict_dataset,
**kwargs
)
datamodule._preprocess = data_pipeline._preprocess_pipeline
datamodule._postprocess = data_pipeline._postprocess_pipeline
return datamodule
| 39.950581 | 119 | 0.663974 | 12,640 | 0.919741 | 0 | 0 | 7,701 | 0.560358 | 0 | 0 | 3,387 | 0.246453 |
641f16f4dbc51fe04590691db0086960375589f4 | 4,118 | py | Python | image_generation/data/testing.py | BScarleth/clevr-dataset-generation-mask | efa11d69b2fc21a2007205cac6a4b0122149af7d | [
"BSD-3-Clause"
] | null | null | null | image_generation/data/testing.py | BScarleth/clevr-dataset-generation-mask | efa11d69b2fc21a2007205cac6a4b0122149af7d | [
"BSD-3-Clause"
] | null | null | null | image_generation/data/testing.py | BScarleth/clevr-dataset-generation-mask | efa11d69b2fc21a2007205cac6a4b0122149af7d | [
"BSD-3-Clause"
] | null | null | null | import cv2
#image = cv2.imread('/home/brenda/Documentos/independent_study/clevr-dataset-gen/output/images/CLEVR_new_000000.png')
#image = cv2.imread('/home/brenda/Escritorio/tmpps5tswcu.png')
image = cv2.imread('/home/brenda/Escritorio/tmp47s462az.png')
print("imagen: ", image.shape, " ", image.shape[0]* image.shape[1])
#row, columns X,Y
cv2.circle(image, (57, 96), 5, (0, 255, 0))
cv2.circle(image, (152, 169), 5, (0, 0, 255))
cv2.circle(image, (217, 141), 5, (255, 0, 0))
cv2.circle(image, (264, 120), 5, (255, 0, 0))
cv2.circle(image, (125, 84), 5, (255, 0, 0))
pixel_center1 = 76800 - ((96 * 320) + 57)
pixel_center2 = 76800 - ((169 * 320) + 152)
pixel_center3 = 76800 - ((141 * 320) + 217)
pixel_center4 = 76800 - ((120 * 320) + 264)
pixel_center5 = 76800 - ((84 * 320) + 125)
#image[96][57] = [255, 116, 140] X Y
#image[169][152] = [255, 116, 140]
#image[141][217] = [255, 116, 140]
#image[120][264] = [255, 116, 140]
#image[84][125] = [255, 116, 140]
#w and h column 124 row 84
#(3,5) would lie in column number 3 and row number 5.
#index = (84 * w320) + 124
############ index = (row * width) + column //// 122 * 320 + 245
#320 es largo
#primero ancho y luego alto
#guardado como 245 (ancho. col), 122 (alto, row)
#pixel_center = (obj["pixel_coords"][1] * height) + obj["pixel_coords"][0]
"""
pixel_center1 = (96 * 320) + 57
pixel_center2 = (169 * 320) + 152
pixel_center3 = (141 * 320) + 217
pixel_center4 = (120 * 320) + 264
pixel_center5 = (84 * 320) + 125
"""
"""
pixel_center1 = (96 * 240) + 57
pixel_center2 = (169 * 240) + 152
pixel_center3 = (141 * 240) + 217
pixel_center4 = (120 * 240) + 264
pixel_center5 = (84 * 240) + 125
"""
"""
pixel_center1 = (57 * 240) + 96
pixel_center2 = (152 * 240) + 169
pixel_center3 = (217 * 240) + 141
pixel_center4 = (264 * 240) + 120
pixel_center5 = (125 * 240) + 84
"""
"""
pixel_center1 = (57 * 320) + 96
pixel_center2 = (152 * 320) + 169
pixel_center3 = (217 * 320) + 141
pixel_center4 = (264 * 320) + 120
pixel_center5 = (125 * 320) + 84
"""
#print("center: ", pixel_center1)
#print("center: ", pixel_center2)
#print("center: ", pixel_center3)
#print("center: ", pixel_center4)
#print("center: ", pixel_center5)
#cv2.imshow('Test image', image)
# (3,3) 3,0 3,1 3,2 3,3 3,4
# (2,2) 2,0 2,1 2,2 2,3 2,4
# (1,1) 1,0 1,1 1,2 1,3 1,4
# (0,0) 0,0 0,1 0,2 0,3 0,4
# 0,0 0,1 0,2 0,3 0,4
# 1,0 1,1 1,2 1,3 1,4
# 2,0 2,1 2,2 2,3 2,4
# 3,0 3,1 3,2 3,3 3,4
#pixel_center = (args.width * args.height) - (obj["pixel_coords"][1] * args.width) + obj["pixel_coords"][0]
colores = {}
c = 0
r = 3
for i in range(20):
if c >= 5:
r -= 1
c = 0
print(r," --> ",c)
c+=1
"""
counter = 0
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if counter == pixel_center1 or counter == pixel_center2 or counter == pixel_center3 or counter == pixel_center4:
print("COLOR PINTADO", counter)
image[i][j] = [255, 116, 140]
counter += 1
"""
#print("COUNTER", counter)
'''
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if (i == j or i + j == image.shape[0]):
#print("imprimir: ", list(image[i][j]))
if (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) in colores:
colores[ (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) ] +=1
else:
colores[ (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) ] = 1
# update the pixel value to black
for i, row in enumerate(image):
# get the pixel values by iterating
for j, pixel in enumerate(image):
if (i == j or i + j == image.shape[0]):
#print("imprimir: ", list(image[i][j]
if (str(image[i][j][0])+"-"+str(image[i][j][1])+"-"+str(image[i][j][2])) == '64-64-64':
image[i][j] = [255, 255, 0]
'''
cv2.imshow('Test image', image)
#170, 250
print(colores)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 26.915033 | 120 | 0.573337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,322 | 0.806702 |
642258fc8f2651e821a5bcb66a823371ffdb8d3d | 213 | py | Python | C1.py | dpocheng/SIMPLESEM-Compiler-Semantic | 9725968a4d8a78ed95e617ad3dbcdb4fc41bd53d | [
"Apache-2.0"
] | null | null | null | C1.py | dpocheng/SIMPLESEM-Compiler-Semantic | 9725968a4d8a78ed95e617ad3dbcdb4fc41bd53d | [
"Apache-2.0"
] | null | null | null | C1.py | dpocheng/SIMPLESEM-Compiler-Semantic | 9725968a4d8a78ed95e617ad3dbcdb4fc41bd53d | [
"Apache-2.0"
] | null | null | null | a, b, c = 3, 1, -1
def main():
global a, b
while a > c:
if a == 0:
print b
else:
b = b + a
a = a - 1
print a
print b
print c
main()
| 15.214286 | 22 | 0.328638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6423b272fd3f08098ebf8d314748cf75a6207bb0 | 1,157 | py | Python | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Summayya and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
from ..gl_entry.gl_entry import create_gl_entry
class PurchaseInvoice(Document):
def validate(self):
self.set_status()
self.set_total_amount()
def set_total_amount(self):
self.amount = 0
self.total_quantity = 0
self.amount = sum(item.amount for item in self.item)
self.total_quantity = sum(item.quantity for item in self.item)
def set_status(self):
'''
Draft: 0
Submitted: 1, Paid or Unpaid or Overdue
Cancelled: 2
'''
if self.is_new():
if self.get('amended_form'):
self.status = 'Draft'
return
if self.docstatus == 1:
self.status = 'Unpaid'
def on_submit(self):
create_gl_entry(self, "Purchase Invoice",
self.credit_account, self.debit_account)
def on_cancel(self):
create_gl_entry(self, "Purchase Invoice",
self.debit_account, self.credit_account) | 26.906977 | 70 | 0.611063 | 948 | 0.81936 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.239412 |
6425b9d5a46f13b1d3e986f11cbc61c7dd984f83 | 499 | py | Python | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='Kvasir',
version='0.9',
description='Kvasir OpenShift App',
author='Dan Gaston',
author_email='admin@deaddriftbio.com',
url='https://www.python.org/community/sigs/current/distutils-sig',
install_requires=['Flask', 'MarkupSafe', 'Flask-MongoEngine', 'Flask-Script',
'numpy', 'Flask-OpenID', 'Cython', 'Flask-Login', 'Flask-Mail',
'pytz', 'pyzmq', 'scipy', 'gemini'],
)
| 38.384615 | 87 | 0.593186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.533066 |
6426477b8ed07962bc5bc2e8ce0ab2a16b7c39da | 1,151 | py | Python | stoich.py | jonasoh/thermosampler | 013e55cef255b89b05c4f0e76bec9512d900f516 | [
"MIT"
] | 2 | 2021-06-11T20:35:20.000Z | 2021-08-19T09:51:17.000Z | stoich.py | jonasoh/thermosampler | 013e55cef255b89b05c4f0e76bec9512d900f516 | [
"MIT"
] | 8 | 2021-07-02T08:14:52.000Z | 2021-11-19T15:56:48.000Z | stoich.py | jonasoh/thermosampler | 013e55cef255b89b05c4f0e76bec9512d900f516 | [
"MIT"
] | 1 | 2021-11-19T09:45:03.000Z | 2021-11-19T09:45:03.000Z | #!/usr/bin/env python3
# Import modules
import sys, os
import pandas as pd
import collections
import itertools
import argparse
import re
# Import functions
from mdf import parse_equation, read_reactions
# Define functions
def sWrite(string):
sys.stdout.write(string)
sys.stdout.flush()
def sError(string):
sys.stderr.write(string)
sys.stderr.flush()
# Main code block
def main(reaction_file, outfile_name, proton_name='C00080'):
# Load stoichiometric matrix
S_pd = read_reactions(open(reaction_file, 'r').read(), proton_name)
# Write stoichiometric matrix to outfile
S_pd.to_csv(outfile_name, "\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'reactions', type=str,
help='Load reactions.'
)
parser.add_argument(
'outfile', type=str,
help='Write stoichiometric matrix in tab-delimited format.'
)
parser.add_argument(
'-H', '--proton_name', default='C00080',
help='Name used to identify protons.'
)
args = parser.parse_args()
main(args.reactions, args.outfile, args.proton_name)
| 24.489362 | 71 | 0.682884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.290182 |
6427067cb0803864cbac859e6090f9b18cef3bcf | 1,090 | py | Python | engine/vector.py | dbrizov/PyTetris | f783526a37944452607d86c5def28dbaf3cd4481 | [
"MIT"
] | 3 | 2018-07-19T11:59:02.000Z | 2021-12-19T15:05:02.000Z | engine/vector.py | dbrizov/PyTetris | f783526a37944452607d86c5def28dbaf3cd4481 | [
"MIT"
] | null | null | null | engine/vector.py | dbrizov/PyTetris | f783526a37944452607d86c5def28dbaf3cd4481 | [
"MIT"
] | null | null | null | import math
class Vector2(tuple):
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
def __add__(self, vector):
return Vector2(self.x + vector.x, self.y + vector.y)
def __sub__(self, vector):
return Vector2(self.x - vector.x, self.y - vector.y)
def __mul__(self, scalar):
return Vector2(self.x * scalar, self.y * scalar)
def __truediv__(self, scalar):
return Vector2(self.x / scalar, self.y / scalar)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __str__(self):
return "({0:.2f}, {1:.2f})".format(self.x, self.y)
@property
def magnitude(self):
return math.sqrt(self.x * self.x + self.y * self.y)
@property
def magnitude_sqr(self):
return self.x * self.x + self.y * self.y
@property
def normalized(self):
return Vector2(self.x / self.magnitude, self.y / self.magnitude)
Vector2.ZERO = Vector2(0, 0)
| 22.708333 | 72 | 0.590826 | 1,044 | 0.957798 | 0 | 0 | 387 | 0.355046 | 0 | 0 | 20 | 0.018349 |
64286b89fc025e5e539006ca29420f5163f9fba7 | 19,894 | py | Python | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Vandaele Rémy <remy.vandaele@ulg.ac.be>"
__contributors__ = ["Marée Raphaël <raphael.maree@ulg.ac.be>"]
__copyright__ = "Copyright 2010-2015 University of Liège, Belgium, http://www.cytomine.be/"
from multiprocessing import Pool
import numpy as np
import scipy.ndimage as snd
from cytomine import models
from scipy import misc
from shapely.geometry import Point
from sklearn.ensemble import ExtraTreesClassifier
from download import *
"""
This function extracts the coordinates of a given term from an offline
cytomine images/coordinates repository.
"""
def getcoords(repository, termid):
if not repository.endswith('/'):
repository = repository + '/'
liste = os.listdir(repository)
x = []
y = []
xp = []
yp = []
im = []
for f in os.listdir(repository):
if (f.endswith('.txt')):
filename = repository + f
F = open(filename, 'rb')
L = F.readlines()
imageid = int(f.rstrip('.txt'))
for j in range(len(L)):
line = L[j].rstrip()
v = line.split(' ')
if (int(v[0]) == termid):
x.append(int(float(v[1])))
y.append(int(float(v[2])))
xp.append(float(v[3]))
yp.append(float(v[4]))
im.append(imageid)
F.close()
return np.array(x), np.array(y), np.array(xp), np.array(yp), np.array(im)
"""
This function returns a 8 bit grey-value image given its identifier in the
offline cytomine repository.
"""
def readimage(repository, idimage, image_type='jpg'):
if (not repository.endswith('/')):
repository = repository + '/'
if (image_type == 'png'):
IM = misc.imread('%s%d.png' % (repository, idimage), flatten=True)
elif (image_type == 'bmp'):
IM = misc.imread('%s%d.bmp' % (repository, idimage), flatten=True)
elif (image_type == 'jpg'):
IM = misc.imread('%s%d.jpg' % (repository, idimage), flatten=True)
IM = np.double(IM)
IM = IM - np.mean(IM)
IM = IM / np.std(IM)
return IM
"""
Given the classifier clf, this function will try to find the landmark on the
image current
"""
def searchpoint(repository, current, clf, mx, my, cm, depths, window_size, image_type,
npred):
simage = readimage(repository, current, image_type)
(height, width) = simage.shape
P = np.random.multivariate_normal([mx, my], cm, npred)
x_v = np.round(P[:, 0] * width)
y_v = np.round(P[:, 1] * height)
height = height - 1
width = width - 1
n = len(x_v)
pos = 0
maxprob = -1
maxx = []
maxy = []
# maximum number of points considered at once in order to not overload the
# memory.
step = 100000
for index in range(len(x_v)):
xv = x_v[index]
yv = y_v[index]
if (xv < 0):
x_v[index] = 0
if (yv < 0):
y_v[index] = 0
if (xv > width):
x_v[index] = width
if (yv > height):
y_v[index] = height
while (pos < n):
xp = np.array(x_v[pos:min(n, pos + step)])
yp = np.array(y_v[pos:min(n, pos + step)])
DATASET = build_dataset_image(simage, window_size, xp, yp, depths)
pred = clf.predict_proba(DATASET)
pred = pred[:, 1]
maxpred = np.max(pred)
if (maxpred >= maxprob):
positions = np.where(pred == maxpred)
positions = positions[0]
xsup = xp[positions]
ysup = yp[positions]
if (maxpred > maxprob):
maxprob = maxpred
maxx = xsup
maxy = ysup
else:
maxx = np.concatenate((maxx, xsup))
maxy = np.concatenate((maxy, ysup))
pos = pos + step
return np.median(maxx), np.median(maxy), height - np.median(maxy)
"""
0-padding of an image IM of wp pixels on all sides
"""
def makesize(IM, wp):
(h, w) = IM.shape
IM2 = np.zeros((h + 2 * wp, w + 2 * wp))
IM2[wp:wp + h, wp:wp + w] = IM
return IM2
"""
Build the dataset on a single image
"""
def build_dataset_image(IM, wp, x_v, y_v, depths):
swp = (2 * wp) ** 2
wp1 = wp + 1
ndata = len(x_v)
dwp = 2 * wp
ndepths = len(depths)
DATASET = np.zeros((ndata, swp * ndepths))
REP = np.zeros(ndata)
images = {}
for z in xrange(ndepths):
images[z] = makesize(snd.zoom(IM, depths[z]), wp)
X = [[int(x * depths[z]) for x in x_v] for z in xrange(ndepths)]
Y = [[int(y * depths[z]) for y in y_v] for z in xrange(ndepths)]
cub = np.zeros((ndepths, dwp, dwp))
for j in xrange(ndata):
x = x_v[j]
y = y_v[j]
for z in xrange(ndepths):
im = images[z]
cub[z, :, :] = im[Y[z][j]:Y[z][j] + dwp, X[z][j]:X[z][j] + dwp]
DATASET[j, :] = cub.flatten()
return DATASET
def rotate_coordinates(repository, num, x, y, angle, image_type):
image = readimage(repository, num, image_type)
image_rotee = snd.rotate(image, -angle)
(h, w) = image.shape
(hr, wr) = image_rotee.shape
angle_rad = angle * (np.pi / 180.)
c = np.cos(angle_rad)
s = np.sin(angle_rad)
x = x - (w / 2.)
y = y - (h / 2.)
xr = ((x * c) - (y * s)) + (wr / 2.)
yr = ((x * s) + (y * c)) + (hr / 2.)
return xr.tolist(), yr.tolist(), image_rotee
def dataset_image_rot(repository, Xc, Yc, R, RMAX, proportion, step, i, ang, depths, window_size, image_type):
# print i
x_v = []
y_v = []
REP = []
IMGS = []
deuxpi = 2. * np.pi
for x in np.arange(np.int(Xc) - R, np.int(Xc) + R + 1, step):
for y in np.arange(np.int(Yc) - R, np.int(Yc) + R + 1, step):
if (np.linalg.norm([Xc - x, Yc - y]) <= R):
x_v.append(x)
y_v.append(y)
REP.append(1)
IMGS.append(i)
n = len(x_v)
image = readimage(repository, i, image_type)
(height, width) = image.shape
height = height - 1
width = width - 1
for t in range(int(round(proportion * n))):
angle = np.random.ranf() * deuxpi
r = R + (np.random.ranf() * (RMAX - R))
tx = int(r * np.cos(angle))
ty = int(r * np.sin(angle))
x_v.append(min(width, Xc + tx))
y_v.append(min(height, Yc + ty))
REP.append(0)
IMGS.append(i)
(x_r, y_r, im) = rotate_coordinates(repository, i, np.round(np.array(x_v)), np.round(np.array(y_v)), angle,
image_type)
(hr, wr) = im.shape
hr = hr - 1
wr = wr - 1
x_r = np.round(x_r)
y_r = np.round(y_r)
for index in range(len(x_r)):
xr = x_r[index]
yr = y_r[index]
if (xr < 0):
x_r[index] = 0
if (yr < 0):
y_r[index] = 0
if (xr > wr):
x_r[index] = wr
if (yr > hr):
y_r[index] = hr
return build_dataset_image(im, window_size, x_r, y_r, depths), REP, IMGS
def mp_helper_rot(job_args):
return dataset_image_rot(*job_args)
def build_datasets_rot_mp(repository, imc, Xc, Yc, R, RMAX, proportion, step, ang, window_size, depths, nimages,
image_type, njobs):
TOTDATA = None
TOTREP = None
IMGS = []
X = None
Y = None
deuxpi = 2. * np.pi
p = Pool(njobs)
job_args = [(
repository, Xc[i], Yc[i], R, RMAX, proportion, step, imc[i], (np.random.ranf() * 2 * ang) - ang, depths,
window_size, image_type) for i in range(nimages)]
T = p.map(mp_helper_rot, job_args)
p.close()
p.join()
return T
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == "__main__":
parameters = {
'cytomine_host': '',
'cytomine_public_key': '',
'cytomine_private_key': '',
'cytomine_id_software': 0,
'cytomine_base_path': '',
'cytomine_working_path': None,
'cytomine_id_term': None,
'cytomine_id_project': None,
'image_type': '',
'model_njobs': None,
'model_R': None,
'model_RMAX': None,
'model_P': None,
'model_npred': None,
'model_ntrees': None,
'model_ntimes': None,
'model_angle': None,
'model_depth': None,
'model_step': None,
'model_wsize': None,
'verbose': False
}
p = optparse.OptionParser(description='Cytomine Landmark Detection : Model building',
prog='Cytomine Landmark Detector : Model builder', version='0.1')
p.add_option('--cytomine_host', type="string", default='beta.cytomine.be', dest="cytomine_host",
help="The Cytomine host (eg: beta.cytomine.be, localhost:8080)")
p.add_option('--cytomine_public_key', type="string", default='XXX', dest="cytomine_public_key",
help="Cytomine public key")
p.add_option('--cytomine_private_key', type="string", default='YYY', dest="cytomine_private_key",
help="Cytomine private key")
p.add_option('--cytomine_id_software', type="int", dest="cytomine_id_software",
help="The Cytomine software identifier")
p.add_option('--cytomine_base_path', type="string", default='/api/', dest="cytomine_base_path",
help="Cytomine base path")
p.add_option('--cytomine_working_path', default="/tmp/", type="string", dest="cytomine_working_path",
help="The working directory (eg: /tmp)")
p.add_option('--cytomine_id_project', type="int", dest="cytomine_id_project",
help="The Cytomine project identifier")
p.add_option('--cytomine_id_term', type='int', dest='cytomine_id_term',
help="The identifier of the term to create a detection model for")
p.add_option('--image_type', type='string', default='jpg', dest='image_type',
help="The type of the images that will be used (jpg, bmp, png,...)")
p.add_option('--model_njobs', type='int', default=1, dest='model_njobs',
help="The number of processors used for model building")
p.add_option('--model_R', type='int', default=6, dest='model_R', help="Max distance for extracting landmarks")
p.add_option('--model_RMAX', type='int', default=200, dest='model_RMAX',
help="Max distance for extracting non-landmarks")
p.add_option('--model_P', type='float', default=3, dest='model_P', help="Proportion of non-landmarks")
p.add_option('--model_npred', type='int', default=50000, dest='model_npred',
help="Number of pixels extracted for prediction")
p.add_option('--model_ntrees', type='int', default=50, dest='model_ntrees', help="Number of trees")
p.add_option('--model_ntimes', type='int', default=3, dest='model_ntimes',
help="Number of rotations to apply to the image")
p.add_option('--model_angle', type='float', default=30, dest='model_angle', help="Max angle for rotation")
p.add_option('--model_depth', type='int', default=5, dest='model_depth', help="Number of resolutions to use")
p.add_option('--model_step', type='int', default=1, dest='model_step',
help="Landmark pixels will be extracted in a grid (x-R:step:x+r,y-R:step:y+R) around the landmark")
p.add_option('--model_wsize', type='int', default=8, dest='model_wsize', help="Window size")
p.add_option('--validation_K', type='int', default=10, dest='validation_K', help="K for K-fold cross validation")
p.add_option('--verbose', type="string", default="0", dest="verbose", help="Turn on (1) or off (0) verbose mode")
options, arguments = p.parse_args(args=sys.argv)
parameters['cytomine_host'] = options.cytomine_host
parameters['cytomine_public_key'] = options.cytomine_public_key
parameters['cytomine_private_key'] = options.cytomine_private_key
parameters['cytomine_id_software'] = options.cytomine_id_software
parameters['cytomine_base_path'] = options.cytomine_base_path
parameters['cytomine_working_path'] = options.cytomine_working_path
parameters['cytomine_id_term'] = options.cytomine_id_term
parameters['cytomine_id_project'] = options.cytomine_id_project
parameters['image_type'] = options.image_type
parameters['model_njobs'] = options.model_njobs
parameters['model_R'] = options.model_R
parameters['model_RMAX'] = options.model_RMAX
parameters['model_P'] = options.model_P
parameters['model_npred'] = options.model_npred
parameters['model_ntrees'] = options.model_ntrees
parameters['model_ntimes'] = options.model_ntimes
parameters['model_angle'] = options.model_angle
parameters['model_depth'] = options.model_depth
parameters['model_step'] = options.model_step
parameters['model_wsize'] = options.model_wsize
parameters['validation_K'] = options.validation_K
parameters['verbose'] = str2bool(options.verbose)
if (not parameters['cytomine_working_path'].endswith('/')):
parameters['cytomine_working_path'] = parameters['cytomine_working_path'] + '/'
cytomine_connection = cytomine.Cytomine(parameters['cytomine_host'], parameters['cytomine_public_key'],
parameters['cytomine_private_key'],
base_path=parameters['cytomine_base_path'],
working_path=parameters['cytomine_working_path'],
verbose=parameters['verbose'])
current_user = cytomine_connection.get_current_user()
run_by_user_job = False
if current_user.algo == False:
user_job = cytomine_connection.add_user_job(parameters['cytomine_id_software'],
parameters['cytomine_id_project'])
cytomine_connection.set_credentials(str(user_job.publicKey), str(user_job.privateKey))
else:
user_job = current_user
run_by_user_job = True
job = cytomine_connection.get_job(user_job.job)
job = cytomine_connection.update_job_status(job, status=job.RUNNING, progress=0,
status_comment="Beginning validation...")
download_images(cytomine_connection, parameters['cytomine_id_project'])
download_annotations(cytomine_connection, parameters['cytomine_id_project'], parameters['cytomine_working_path'])
repository = parameters['cytomine_working_path'] + str(parameters['cytomine_id_project']) + '/'
txt_repository = parameters['cytomine_working_path'] + '%d/txt/' % parameters['cytomine_id_project']
depths = 1. / (2. ** np.arange(parameters['model_depth']))
image_type = parameters['image_type']
(xc, yc, xr, yr, imc) = getcoords(txt_repository, parameters['cytomine_id_term'])
nimages = np.max(xc.shape)
mx = np.mean(xr)
my = np.mean(yr)
P = np.zeros((2, nimages))
P[0, :] = xr
P[1, :] = yr
cm = np.cov(P)
passe = False
for times in range(parameters['model_ntimes']):
if (times == 0):
rangrange = 0
else:
rangrange = parameters['model_angle']
T = build_datasets_rot_mp(repository, imc, xc, yc, parameters['model_R'], parameters['model_RMAX'],
parameters['model_P'], parameters['model_step'], rangrange, parameters['model_wsize'],
depths, nimages, parameters['image_type'], parameters['model_njobs'])
for i in range(len(T)):
(data, rep, img) = T[i]
(height, width) = data.shape
if (not passe):
passe = True
DATA = np.zeros((height * (len(T) + 100) * parameters['model_ntimes'], width))
REP = np.zeros(height * (len(T) + 100) * parameters['model_ntimes'])
IMG = np.zeros(height * (len(T) + 100) * parameters['model_ntimes'])
b = 0
be = height
DATA[b:be, :] = data
REP[b:be] = rep
IMG[b:be] = img
b = be
be = be + height
REP = REP[0:b]
DATA = DATA[0:b, :]
IMG = IMG[0:b]
erreur = []
g = np.random.randint(0, parameters['validation_K'], nimages)
groupes = np.zeros(IMG.shape)
G = {}
for i in range(parameters['validation_K']):
G[i] = []
for i in range(nimages):
t = np.where(IMG == imc[i])
t = t[0]
groupes[t] = g[i]
G[g[i]].append(imc[i])
Xh = {}
Yh = {}
for i in range(nimages):
Xh[imc[i]] = xc[i]
Yh[imc[i]] = yc[i]
for k in range(parameters['validation_K']):
t = np.where(groupes != k)
t = t[0]
TRDATA = DATA[t, :]
TRREP = REP[t]
clf = ExtraTreesClassifier(n_jobs=parameters['model_njobs'], n_estimators=parameters['model_ntrees'])
clf = clf.fit(TRDATA, TRREP)
for j in G[k]:
(x, y, yp) = searchpoint(repository, j, clf, mx, my, cm, depths, parameters['model_wsize'],
parameters['image_type'], parameters['model_npred'])
circle = Point(x, yp)
location = circle.wkt
new_annotation = cytomine_connection.add_annotation(location, j)
cytomine_connection.add_annotation_term(new_annotation.id, term=parameters['cytomine_id_term'],
expected_term=parameters['cytomine_id_term'], rate=1.0,
annotation_term_model=models.AlgoAnnotationTerm)
er = np.linalg.norm([x - Xh[j], y - Yh[j]])
print j, x, y, Xh[j], Yh[j], er
erreur.append(er)
moy = np.mean(erreur)
ste = np.std(erreur)
ec95 = 1.96 * (np.std(erreur) / np.sqrt(nimages))
print parameters['cytomine_id_term'], moy - ec95, moy, moy + ec95
job_parameters = {}
job_parameters['landmark_term'] = parameters['cytomine_id_term']
job_parameters['landmark_r'] = parameters['model_R']
job_parameters['landmark_rmax'] = parameters['model_RMAX']
job_parameters['landmark_p'] = parameters['model_P']
job_parameters['landmark_npred'] = parameters['model_npred']
job_parameters['landmark_ntimes'] = parameters['model_ntimes']
job_parameters['landmark_alpha'] = parameters['model_angle']
job_parameters['landmark_depth'] = parameters['model_depth']
job_parameters['landmark_window_size'] = parameters['model_wsize']
job_parameters['forest_n_estimators'] = parameters['model_ntrees']
job_parameters['forest_max_features'] = 10
job_parameters['forest_min_samples_split'] = 2
job_parameters['validation_K'] = parameters['validation_K']
job_parameters['validation_result_mean'] = moy
if run_by_user_job == False:
job_parameters_values = cytomine_connection.add_job_parameters(user_job.job, cytomine_connection.get_software(
parameters['cytomine_id_software']), job_parameters)
job = cytomine_connection.update_job_status(job, status=job.TERMINATED, progress=100,
status_comment="Validation done.")
| 38.111111 | 120 | 0.595255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,115 | 0.257061 |
642b0395e141004aacfb61ba31176a5747e34b7b | 8,235 | py | Python | simulador de credito 1.py | Argenta47/talleres_de_algoritmos | cfca1984c69702d48fb165070eb0c60b16977e51 | [
"MIT"
] | null | null | null | simulador de credito 1.py | Argenta47/talleres_de_algoritmos | cfca1984c69702d48fb165070eb0c60b16977e51 | [
"MIT"
] | null | null | null | simulador de credito 1.py | Argenta47/talleres_de_algoritmos | cfca1984c69702d48fb165070eb0c60b16977e51 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Simulador de credito
- Monto del prestamo del carro
- Ingresa tu cuota inicial
- Ingresos mensuales
- Numero de meses del prestamo
- Datos personales
----------------------------------------
Ingresos mensuales
- 908.526 - 1.000.000 >>>>>>>>>>>>>> 20.000.000
- 1.000.000 - 2.000.000 >>>>>>>>>> 25.000.000
- 2.000.000 - 2.500.000 >>>>>>>>>> 30.000.000
- 2.500.000 - 3.000.000 >>>>>>>>>> 40.000.000
- 3.000.000 - 4.500.000 >>>>>>>>>> 60.000.000
- 4.500.000 o mas >>>>>>>>>>>>>>>> 120.000.000
12 Meses - 1.772.488
20.000.000 ->>> 100
21.269.586 ->>> 106.3% /// equivale a un 6.3% de interes total
1.269.856
105.798 de interès cada mes
24 Meses - 936.993
20.000.000 ->>> 100
22.487.832 ->>> 112.4% /// equivale a un 12.4% de interes total
2.500.000
104.659 de interes cada mes
36 meses - 659.710
20.000.000 ->>> 100
23.749.560 ->>> 118.7% /// equivale a un 18.7% de interes total
48 meses - 521.976
20.000.000 ->>> 100
25.054.848 ->>> 125,2% /// equivale a un 25.2% de interes total
60 meses - 440.053
20.000.000 ->>> 100
25.054.848 ->>> 132% /// equivale a un 32% de interes total
72 meses - 386.030
20.000.000 ->>> 100
27.794.160 ->>> 138.9% /// equivale a un 38.9% de interes total
"""
import sys
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("|||||||||||||BIENVENIDO AL SIMULADOR DE CREDITO DE VEHICULO|||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||||||||||compra el vehiculo de tus sueños||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
dp_nombre = str(input("Por favor digita tu nombre y apellido: "))
dp_edad = int(input("Por favor digita tu edad: "))
if dp_edad < 18:
sys.exit("El credito es solo para mayores de edad")
if dp_edad >= 70:
sys.exit("El credito es solo para personas entre los 18 y 69 años")
if dp_edad > 18 and dp_edad <= 70:
dp_salario = int(input("por favor digita tu salario: "))
if dp_salario < 908526:
sys.exit("Monto no valido, valor minimo de prestamo 1SMLV")
# 908.526 - 1.000.000 >>>>>>>>>>>>>> 20.000.000
if dp_salario > 908526 and dp_salario <= 1000000:
print("El prestamo podra ser desde 1.000.000 hasta 20.000.000 cop")
# 1.000.000 - 2.000.000 >>>>>>>>>> 25.000.000
if dp_salario > 1000000 and dp_salario <= 2000000:
print("El prestamo podra ser desde 1.000.000 hasta 25.000.000 cop")
# 2.000.000 - 2.500.000 >>>>>>>>>> 30.000.000
if dp_salario > 2000000 and dp_salario <= 2500000:
print("El prestamo podra ser desde 1.000.000 hasta 30.000.000 cop")
# 2.500.000 - 3.000.000 >>>>>>>>>> 40.000.000
if dp_salario > 2500000 and dp_salario <= 3000000:
print("El prestamo podra ser desde 1.000.000 hasta 40.000.000 cop")
# 3.000.000 - 4.500.000 >>>>>>>>>> 60.000.000
if dp_salario > 3000000 and dp_salario <= 4500000:
print("El prestamo podra ser desde 1.000.000 hasta 60.000.000 cop")
# 4.500.000 o mas >>>>>>>>>>>>>>>> 120.000.000
if dp_salario > 4500000:
print("El prestamo podra ser desde 1.000.000 hasta 120.000.000 cop")
#####################################################
if dp_salario > 908526:
credito1 = int(input("Ingrese el valor del vehiculo: "))
while credito1 < 1000000:
print("El valor no es valido, inserte un valor desde 1'000.000")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 908526 and dp_salario <= 1000000 and credito1 > 20000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 20.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 1000000 and dp_salario <= 2000000 and credito1 > 25000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 25.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 2000000 and dp_salario <= 2500000 and credito1 > 30000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 30.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 2500000 and dp_salario <= 3000000 and credito1 > 40000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 40.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 3000000 and dp_salario <= 4500000 and credito1 > 60000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 60.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
while dp_salario > 4500000 and credito1 > 120000000 or credito1 < 1000000:
print("Monto no valido - El prestamo podra ser desde 1.000.000 hasta 120.000.000 cop")
credito1 = int(input("Ingrese el valor del vehiculo: "))
if credito1 > 1000000:
cuota_inicial = int(input("Ingrese el valor de su cuota inicial: "))
dinero_a_financiar = credito1 - cuota_inicial
print("El dinero a financiar sera: ", dinero_a_financiar)
if credito1 > 1000000 and dinero_a_financiar > 1:
cuotas = int(input("Ingrese el numero de cutas a financiar (12) (24) (36) (48) (60) (72): "))
while credito1 > 1000000 and dinero_a_financiar > 1 and cuotas != 12 and cuotas != 24 and cuotas != 36 and cuotas != 48 and cuotas != 60 and cuotas != 72:
print("Numero de cuotas no valido")
cuotas = int(input("Ingrese el numero de cutas a financiar (12) (24) (36) (48) (60) (72): "))
print("Señor(a): ",dp_nombre)
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 12:
interes12 = (dinero_a_financiar * 106.3)/100
print("El valor final aproximado a pagar es: ", round(interes12))
cuota_mensual12 = interes12/12
print("La cuota mensual aproximada es de: ", round(cuota_mensual12))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 24:
interes24 = (dinero_a_financiar * 112.4)/100
print("El valor final a pagar es: ", round(interes24))
cuota_mensual24 = interes24/24
print("La cuota mensual es de: ", round(cuota_mensual24))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 36:
interes36 = (dinero_a_financiar * 118.7)/100
print("El valor final aproximado a pagar es: ", round(interes36))
cuota_mensual36 = interes36/36
print("La cuota mensual aproximada es de: ", round(cuota_mensual36))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 48:
interes48 = (dinero_a_financiar * 125.2)/100
print("El valor final aproximado a pagar es: ", round(interes48))
cuota_mensual48 = interes48/48
print("La cuota mensual aproximada es de: ", round(cuota_mensual48))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 60:
interes60 = (dinero_a_financiar * 132)/100
print("El valor final aproximado a pagar es: ", round(interes60))
cuota_mensual60 = interes60/60
print("La cuota mensual aproximada es de: ", round(cuota_mensual60))
if credito1 > 1000000 and dinero_a_financiar > 1 and cuotas == 72:
interes72 = (dinero_a_financiar * 138.9)/100
print("El valor final aproximado a pagar es: ", round(interes72))
cuota_mensual72 = interes72/72
print("La cuota mensual aproximada es de: ", round(cuota_mensual72))
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||GRACIAS POR USAR SIMULADOR DE CREDITO DE VEHICULO|||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
print("||||||||||||||||||compra el vehiculo de tus sueños|||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
"""
Fuentes:
https://www.delftstack.com/es/howto/python/how-to-round-to-two-decimals-in-python/
https://www.delftstack.com/es/howto/python/python-exit-program/
https://www.bancodeoccidente.com.co/occiauto/autogestionado
""" | 37.60274 | 155 | 0.609107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,711 | 0.571723 |
642bbc4b4f8e24532cb2b27107b10dda9e845b52 | 1,527 | py | Python | py/g1/operations/databases/subscribers/g1/operations/databases/subscribers/parts.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/g1/operations/databases/subscribers/g1/operations/databases/subscribers/parts.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/g1/operations/databases/subscribers/g1/operations/databases/subscribers/parts.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | import g1.asyncs.agents.parts
import g1.messaging.parts.subscribers
from g1.apps import parameters
from g1.apps import utils
from g1.asyncs.bases import queues
from g1.bases import labels
# For now these are just aliases.
from g1.messaging.parts.subscribers import make_subscriber_params
from .. import subscribers # pylint: disable=relative-beyond-top-level
SUBSCRIBER_LABEL_NAMES = (
# Output.
'queue',
# Private.
('subscriber', g1.messaging.parts.subscribers.SUBSCRIBER_LABEL_NAMES),
)
def define_subscriber(module_path=None, **kwargs):
module_path = module_path or subscribers.__name__
module_labels = labels.make_nested_labels(
module_path,
SUBSCRIBER_LABEL_NAMES,
)
setup_subscriber(
module_labels,
parameters.define(module_path, make_subscriber_params(**kwargs)),
)
return module_labels
def setup_subscriber(module_labels, module_params):
utils.define_maker(
make_queue,
{
'return': module_labels.queue,
},
)
utils.define_maker(
subscribers.make_subscriber,
{
'queue': module_labels.queue,
'return': module_labels.subscriber.subscriber,
},
)
g1.messaging.parts.subscribers.setup_subscriber(
module_labels.subscriber,
module_params,
)
def make_queue(shutdown_queue: g1.asyncs.agents.parts.LABELS.shutdown_queue):
queue = queues.Queue(capacity=32)
shutdown_queue.put_nonblocking(queue.close)
return queue
| 26.789474 | 77 | 0.703995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.089718 |
642d1130e6fc3e78e4f3c772b4be2992b13429c7 | 9,247 | py | Python | mhd/python/plot_over_time_CVS_smart.py | FinMacDov/AMR_code | 36e25fab72cd86cdda25e4e70ea3afd584b7dbfd | [
"MIT"
] | null | null | null | mhd/python/plot_over_time_CVS_smart.py | FinMacDov/AMR_code | 36e25fab72cd86cdda25e4e70ea3afd584b7dbfd | [
"MIT"
] | 1 | 2018-08-14T12:29:07.000Z | 2018-08-14T12:29:07.000Z | mhd/python/plot_over_time_CVS_smart.py | FinMacDov/AMR_code | 36e25fab72cd86cdda25e4e70ea3afd584b7dbfd | [
"MIT"
] | null | null | null | import csv
import matplotlib.pyplot as plt
from matplotlib import interactive
from matplotlib import rcParams, cycler
from scipy.interpolate import interp1d
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import img2vid as i2v
import glob
import numpy as np
import os
"""
Created on Wed May 9 13:12:34 2018
@author: Fionnlagh Mackenzie Dover
"""
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
# tanh
timeunit = 68.6985 #s
# c7
#timeunit = 155.658*2#2.14683
F = False
T = True
linewidth = 4
colour_map = F
line_plot_evo = F
vs_cs = F
plot_Te_v2 = F
movie = F
mass_density_sum = F
plasma_beta = T
sound_speed = F
img_save = T
#c7
#location = '/media/fionnlagh/W7_backup/c7test/longrun'
#tanh
location = '/run/user/1001/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/sims/jet/tanh_sb'
save_loc = '/home/fionnlagh/work/AMR_code/mhd/python/image_testing/'
step = 25
fps = 5
fontsize = 18
labelsize = 14
B_cgs = 60 # G
Guass_2_telsa = 1e-4
B_SI = B_cgs*Guass_2_telsa # tesla
miu0_si = 1.2566370614e-6 # H m^-1
# cgs
runit = 2.3416704877999998E-015
punit = 0.31754922400000002
tunit = 1e6
gamma = 5/3
vunit = 11645084.295622544
# location = '/home/fionnlagh/work/dat/mhd/2d/c7_relax_run_csv'
#location = '/home/fionnlagh/sim_data/jet_simple/slice1'
global file_path
file_path = os.path.abspath(location)
global total_files
total_files = listdir_fullpath(location)
total_files = sorted(total_files)
fname = []
for file in total_files:
if file.endswith("csv"):
fname.append(file)
v2t = []
yt = []
tick_clip = []
pt = []
rhot = []
yt = []
Tet = []
spt = 0
ept = len(fname)
NBpts = 800
tick = np.arange(spt, ept)*timeunit
for sim_time in range(spt, ept, step):
tick_clip.append(sim_time*timeunit)
with open(fname[sim_time]) as csvfile:
reader = csv.DictReader(csvfile)
y = []
rho = []
p = []
v2 = []
for row in reader:
rho.append(row['rho'])
p.append(row['p'])
v2.append(row['v2'])
y.append(row['Y'])
# This allows us to work with the data
rho = np.array(rho).astype(np.float)
p = np.array(p).astype(np.float)
Te = np.divide(p/punit, rho/runit)*tunit
y = np.array(y).astype(np.float)
v2 = np.array(v2).astype(np.float)
# Interpolation of the 1d data set for current time step
p_interp = interp1d(y, p*0.1, kind='linear') # SI
rho_interp = interp1d(y, rho*1000, kind='linear') # SI
v2_interp = interp1d(y, v2*0.01, kind='linear') # SI
Te_interp = interp1d(y, Te, kind='linear') # SI
# Selects data points for interp
yt = np.linspace(y[0], y[-1], num=NBpts, endpoint=True)
# Collect data together
v2t.append(v2_interp(yt))
rhot.append(rho_interp(yt))
pt.append(p_interp(yt))
Tet.append(Te_interp(yt))
vlimit = np.floor(np.max(np.abs(v2t)))
Te_ulimit = np.floor(np.max(Tet)+0.5*np.max(Tet))
Te_llimit = np.floor(np.min(Tet)-0.5*np.min(Tet))
mach = np.sqrt(gamma*(np.array(pt)/np.array(rhot)))
v2t_cs_array = v2t/mach
if colour_map:
H, Tunit = np.meshgrid(yt, np.asarray(tick_clip)/60**2)
# https://matplotlib.org/examples/color/colormaps_reference.html
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, figsize=(18, 20),
dpi=80, facecolor='w', edgecolor='k')
if vs_cs == T:
v_cs_limit = np.floor(np.max(np.abs(v2t_cs_array)))
levels1 = MaxNLocator(nbins=500).tick_values(-v_cs_limit, v_cs_limit)
cmap1 = plt.get_cmap('seismic')
norm1 = BoundaryNorm(levels1, ncolors=cmap1.N)
cf1 = ax1.contourf(Tunit, H, v2t_cs_array, cmap=cmap1, levels=levels1)
fig.colorbar(cf1, ax=ax1, label='$\frac{v_y}{c_s}$ [$\mathrm{m\;s^{-1}}$]') # v2 units
else:
levels1 = MaxNLocator(nbins=500).tick_values(-vlimit, vlimit)
cmap1 = plt.get_cmap('seismic')
norm1 = BoundaryNorm(levels1, ncolors=cmap1.N)
cf1 = ax1.contourf(Tunit, H, v2t, cmap=cmap1, levels=levels1)
fig.colorbar(cf1, ax=ax1, label='$v_y$ [$\mathrm{m\;s^{-1}}$]') # v2 units
cmap2 = plt.get_cmap('hot')
levels2 = MaxNLocator(nbins=100).tick_values(np.min(rhot), np.max(rhot))
norm2 = BoundaryNorm(levels2, ncolors=cmap2.N)
cf2 = ax2.contourf(Tunit, H, rhot, cmap=cmap2, levels=levels2)
fig.colorbar(cf2, ax=ax2, label=r'$\rho$ [$\mathrm{kg\;m^{-3}}$]')
ax2.set_ylabel('$y$ [$\mathrm{Mm}$]', fontsize=14)
cmap3 = plt.get_cmap('coolwarm')
levels3 = MaxNLocator(nbins=100).tick_values(np.min(Tet), np.max(Tet))
norm3 = BoundaryNorm(levels3, ncolors=cmap3.N)
cf3 = ax3.contourf(Tunit, H, Tet, cmap=cmap3, levels=levels3)
fig.colorbar(cf3, ax=ax3, label='Te [$\mathrm{K}$]')
ax3.set_xlabel('Time [hrs]', fontsize=14)
if img_save:
fig.savefig(save_loc+'colour_map_vy_rho_Te', dpi=180)
plt.close(fig)
if line_plot_evo:
spacer = 0
zero_pts = np.zeros(len(v2t))
# Selects every nth row
LIM = [0, 10]
v2t_cs = v2t_cs_array[LIM[0]::LIM[-1]].T
# Make color gradient
N = len(v2t_cs.T)
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
SPACER = 5
gaps = np.linspace(0, len(v2t_cs.T)*SPACER, len(v2t_cs.T))
line = np.full(len(gaps), 7)
zero_line = np.full((len(yt), len(gaps)), gaps)
fig2, ax2 = plt.subplots(figsize=(18, 20))
fontsize = 16
plt.plot(yt, v2t_cs+gaps)
plt.plot(yt, zero_line, linestyle='--')
labels = np.chararray(N, unicode=True, itemsize=5)
yt_label_pos = yt[LIM[0]::LIM[-1]].T
labels[:] = ' '
labels[::4] = (str(-SPACER))
labels[1::4] = ('0.0')
labels[2::4] = (str(SPACER))
plt.yticks(gaps, labels, fontsize=fontsize)
plt.rc('xtick', labelsize=fontsize)
time_stamps = tick[LIM[0]::LIM[-1]]
text_pos_1 = np.full(len(v2t_cs.T), 9)
text_pos_2 = zero_line[0]+1
for i in range(len(text_pos_1)):
ax2.text(text_pos_1[i], text_pos_2[i],
str(round(time_stamps[i], -1))+' s', fontsize=fontsize)
plt.xlabel('y [Mm]', fontsize=fontsize)
plt.ylabel('v2/cs', fontsize=fontsize)
plt.xlim(0, 10)
if plot_Te_v2:
fig, (ax1, ax2) = plt.subplots(2, 1)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
for i in range(len(Tet)):
ax1.set_ylabel('Te [$\mathrm{K}$]')
ax2.set_ylabel('$v_y$ [$\mathrm{m\;s^{-1}}$]')
ax1.semilogy(yt, Tet[i], color='r', linewidth=linewidth)
ax1.semilogy(yt, Tet[0], color='grey', linewidth=linewidth,
linestyle='dashed')
ax1.set_ylim(Te_llimit, Te_ulimit)
ax2.plot(yt, v2t[i], linewidth=linewidth)
ax2.set_ylim(-vlimit-vlimit*0.1, vlimit+vlimit*0.1)
# ax2.set_ylim(-50, 50)
textstr = '%.3f' % round(tick_clip[i]/60**2, 3)+' hrs'
ax2.text(0.75, 0.1, textstr, transform=ax2.transAxes,
fontsize=fontsize,bbox=props)
if img_save:
fig.savefig(save_loc+'/evo/'+'Te_vy_evo'+"{0:04}".format(i), dpi=180)
ax1.clear()
ax2.clear()
plt.close(fig)
if movie:
master_dir = save_loc+'evo'
name = 'Te_vy_evo'
total_files = glob.glob(master_dir+'/'+name+'*.png')
hsize = 586
wsize = 782
sav_path = '/shared/mhd_jet1/User/smp16fm/sims/atmos/c7/vids'
total_files = sorted(total_files)
prefix = name
file_sav_name = name
sav_loc = master_dir
i2v.image2video(filepath=sav_loc, prefix=file_sav_name, in_extension='png',
output_name=file_sav_name+'video', out_extension='avi',
fps=fps, n_loops=1, delete_images=True,
delete_old_videos=True, res=1080, overlay=False, cover_page=False)
if mass_density_sum:
total_mass_den = np.asarray(rhot).sum(axis=1)
plt.plot(np.asarray(tick_clip)/60**2,total_mass_den, linewidth=linewidth)
plt.ylabel('Total mass density [$\mathrm{kg\;m^{-3}}$]', fontsize=fontsize)
plt.xlabel('Time [hrs]', fontsize=fontsize)
plt.tick_params(labelsize=labelsize)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
if img_save:
plt.savefig(save_loc+'total_mass_density', dpi=180)
plt.close()
if plasma_beta:
plt.semilogy(yt, (pt[0]/0.1)/(B_cgs**2/(2*4*np.pi)), linewidth=linewidth)
plt.ylabel(r'$\beta$', fontsize=fontsize)
plt.xlabel('$y$ [$\mathrm{Mm}$]', fontsize=fontsize)
plt.tick_params(labelsize=labelsize)
if img_save:
plt.savefig(save_loc+'plasma_beta_test', dpi=180)
plt.close()
if sound_speed:
plt.semilogy(yt, mach[0], linewidth=linewidth)
plt.ylim(mach[0].min()-0.1*mach[0].min(), mach[0,-1]+0.1*mach[0,-1])
plt.ylabel('$c_S$ [$\mathrm{m\;s^{-1}}$]', fontsize=fontsize)
plt.xlabel('$y$ [$\mathrm{Mm}$]', fontsize=fontsize)
plt.tick_params(labelsize=labelsize)
if img_save:
plt.savefig(save_loc+'sound_speed', dpi=180)
plt.close()
| 32.332168 | 122 | 0.627339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,594 | 0.17238 |
642d2bd6b0ba6f29debf7f63d005d6f4745e8609 | 2,349 | py | Python | python_tools/power.py | HowieWang1/myown-tools | 6ad1fba6d97300c6d15c26d89d5a0c1dc5f9d79d | [
"MIT"
] | 2 | 2021-06-13T13:15:25.000Z | 2021-07-29T00:26:38.000Z | python_tools/power.py | HowieWang1/myown-tools | 6ad1fba6d97300c6d15c26d89d5a0c1dc5f9d79d | [
"MIT"
] | 6 | 2021-06-04T08:05:03.000Z | 2021-07-04T03:50:00.000Z | python_tools/power.py | HowieWang1/myown-tools | 6ad1fba6d97300c6d15c26d89d5a0c1dc5f9d79d | [
"MIT"
] | null | null | null | import os
import tkinter as tk
from telnetlib import Telnet
import ctp.pdu.apc as apc
class PDUPower():
def __init__(self):
self.window = tk.Tk()
self.pdu1 = tk.IntVar()
self.pdu2 = tk.IntVar()
self.pdu3 = tk.IntVar()
self.pdu4 = tk.IntVar()
self.p1 = tk.Checkbutton(self.window, text='PDU: 1', variable=self.pdu1, onvalue=1, offvalue=0)
self.p2 = tk.Checkbutton(self.window, text='PDU: 2', variable=self.pdu2, onvalue=1, offvalue=0)
self.p3 = tk.Checkbutton(self.window, text='PDU: 3', variable=self.pdu3, onvalue=1, offvalue=0)
self.p4 = tk.Checkbutton(self.window, text='PDU: 4', variable=self.pdu4, onvalue=1, offvalue=0)
def pdu(self, powerStatue='off'):
tmp = [self.pdu1.get(), self.pdu2.get(), self.pdu3.get(), self.pdu4.get()]
pdu_cli = apc.APC('10.0.0.253', usr='apc', pwd='apc -c')
cmd = ",".join([str(i+1) for i, val in enumerate(tmp) if val == 1])
if powerStatue is 'on':
pdu_cli.power_on(cmd)
for i in cmd.split(','):
[self.p1, self.p2, self.p3, self.p4][int(i)-1].config(bg='green')
if powerStatue is 'off':
pdu_cli.power_off(cmd)
for i in cmd.split(','):
[self.p1, self.p2, self.p3, self.p4][int(i)-1].config(bg='grey')
def __del__(self):
print('destory the window')
self.window.quit()
def PowerControl(self):
self.window.title('PDU Power')
self.window.geometry('500x300')
l = tk.Label(self.window, text='DST PDU Power Control', bg='light blue', font=('Arial', 12), width=30, height=2)
l.pack()
self.p1.pack()
self.p2.pack()
self.p3.pack()
self.p4.pack()
button1 = tk.Button(self.window, text='ready to on', width=10, height=2, command=lambda: self.pdu('on'))
button2 = tk.Button(self.window, text='ready to off', width=10, height=2, command=lambda: self.pdu('off'))
button3 = tk.Button(self.window, text='logout', width=10, height=2, command=self.__del__)
button1.pack()
button2.pack()
button3.pack()
self.window.mainloop()
if __name__ == '__main__':
easyCommand = PDUPower()
easyCommand.PowerControl()
| 39.813559 | 121 | 0.573436 | 2,159 | 0.919115 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.097488 |
642d2e7d44ed6fcd127074d651ed9e62d6aa3d5f | 450 | py | Python | 33-inheritance.py | MKen212/pymosh | 6db31f181081baf3e4cc3d7228a3480b16bd3cda | [
"MIT"
] | null | null | null | 33-inheritance.py | MKen212/pymosh | 6db31f181081baf3e4cc3d7228a3480b16bd3cda | [
"MIT"
] | null | null | null | 33-inheritance.py | MKen212/pymosh | 6db31f181081baf3e4cc3d7228a3480b16bd3cda | [
"MIT"
] | null | null | null | class Mammal:
x = 10
def walk(self):
print("Walking")
# class Dog:
# def walk(self):
# print("Walking")
# class Cat:
# def walk(self):
# print("Walking")
class Dog(Mammal):
def bark(self):
print("Woof, woof!")
class Cat(Mammal):
pass # Just used here as Python does not like empty classes. Pass just passes...
rover = Dog()
print(rover.x)
rover.bark()
fluffy = Cat()
fluffy.walk()
| 13.636364 | 85 | 0.575556 | 241 | 0.535556 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.477778 |
643115619018886865b6c08a6d8862db0370f968 | 103 | py | Python | pluribus/poker/evaluation/__init__.py | keithlee96/pluribus-poker-AI | 15e52fe73dd09570e782dd0e7b9069865eb5823d | [
"MIT"
] | 113 | 2020-08-06T15:03:18.000Z | 2022-03-31T01:56:34.000Z | poker_ai/poker/evaluation/__init__.py | fedden/pluribus | 73fb394b26623c897459ffa3e66d7a5cb47e9962 | [
"MIT"
] | null | null | null | poker_ai/poker/evaluation/__init__.py | fedden/pluribus | 73fb394b26623c897459ffa3e66d7a5cb47e9962 | [
"MIT"
] | 42 | 2020-08-17T15:51:30.000Z | 2022-03-31T17:10:44.000Z | from .eval_card import EvaluationCard
from .evaluator import Evaluator
from .lookup import LookupTable
| 25.75 | 37 | 0.854369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
643158e261dedfd6f790d1bd79a39f44e68fc5d7 | 844 | py | Python | dynamicgem/graph_generation/getAS_nx.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/graph_generation/getAS_nx.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/graph_generation/getAS_nx.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
import os
DATA_DIR = 'as-733'
fnames = sorted(os.listdir(DATA_DIR))
routersD = {}
routerId = 0
file_sno = 1
for curr_file in fnames:
with open(DATA_DIR+ '/' + curr_file) as f:
G = nx.DiGraph()
G.add_nodes_from(range(7716))
for line in f:
line = line.strip()
if line.startswith('#'):
continue
v_i, v_j = line.split('\t')
if v_i not in routersD:
routersD[v_i] = routerId
routerId += 1
if v_j not in routersD:
routersD[v_j] = routerId
routerId += 1
# G.add_node(routersD[v_i])
# G.add_node(routersD[v_j])
G.add_edge(routersD[v_i], routersD[v_j], weight=1)
fname_s = 'as-graphs/month_' + str(file_sno) + '_graph.gpickle'
print('File %d/%d' % (file_sno, len(fnames)))
file_sno += 1
nx.write_gpickle(G, fname_s)
| 24.823529 | 66 | 0.626777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.14218 |
6431a7d544c46cfb92483bf64ab03075de332a4d | 2,477 | py | Python | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 39 | 2016-10-31T23:28:59.000Z | 2022-01-15T00:00:42.000Z | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 1,690 | 2017-02-07T23:39:48.000Z | 2022-03-31T22:30:44.000Z | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 25 | 2017-02-08T18:25:31.000Z | 2021-09-20T22:55:25.000Z | import re
from email_validator import validate_email, EmailSyntaxError
from virtool.users.utils import PERMISSIONS
RE_HEX_COLOR = re.compile("^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$")
def strip(value: str) -> str:
"""
Strip flanking whitespace from the passed string. Used to coerce values in Cerberus validators.
:param value: the string to strip
:return: the stripped string
"""
return value.strip()
def is_permission_dict(field: str, value: dict, error: callable):
"""
Checks that all keys included in permissions dictionary are valid permissions.
If invalid key is found, error message is updated to "keys must be valid permissions"
:param field: permissions field to check
:param value: permissions dictionary value
:param error: points to the calling validator’s _error method
"""
if any(key not in PERMISSIONS for key in value):
error(field, "keys must be valid permissions")
def has_unique_segment_names(field: str, value: list, error: callable):
"""
Checks that no duplicate names are used for segment names in list
If duplicate names are found, error message is updated to "list contains duplicate names"
:param field: field to check
:param value: list value
:param error: points to the calling validator’s _error method
"""
if len({seg["name"] for seg in value}) != len(value):
error(field, "list contains duplicate names")
def is_valid_hex_color(field: str, value: str, error: callable):
"""
Checks that color is a valid Hexadecimal color, performs check using regex format comparison
If color is an invalid Hexadecimal color, error message is updated to "This is not a valid Hexadecimal color"
:param field: color field to check
:param value: color string value
:param error: points to the calling validator’s _error method
"""
if not RE_HEX_COLOR.match(value):
error(field, "This is not a valid Hexadecimal color")
def is_valid_email(field: str, value: str, error: callable):
"""
Checks that email is a valid email according to email_validator.validate_email
If email is invalid, error message is updated to "Not a valid email"
:param field: email field to check
:param value: email string value
:param error: points to the calling validator’s _error method
"""
try:
validate_email(value)
except EmailSyntaxError:
error(field, "Not a valid email")
| 32.168831 | 113 | 0.705289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,682 | 0.676861 |
643356b0b42e24450c9db6f057bc342c9400746c | 116 | py | Python | guildwars2/guild/__init__.py | n1tr0-5urf3r/GW2Bot | 788e94926766ac16661211cb528be47629b0ad07 | [
"MIT"
] | 75 | 2017-05-06T18:53:34.000Z | 2022-01-10T08:02:48.000Z | guildwars2/guild/__init__.py | n1tr0-5urf3r/GW2Bot | 788e94926766ac16661211cb528be47629b0ad07 | [
"MIT"
] | 81 | 2017-05-07T18:04:06.000Z | 2021-12-13T13:34:45.000Z | guildwars2/guild/__init__.py | n1tr0-5urf3r/GW2Bot | 788e94926766ac16661211cb528be47629b0ad07 | [
"MIT"
] | 56 | 2017-05-07T06:58:29.000Z | 2022-03-28T23:23:42.000Z | from .general import GeneralGuild
from .sync import GuildSync
class GuildMixin(GeneralGuild, GuildSync):
pass
| 16.571429 | 42 | 0.793103 | 51 | 0.439655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6434145fe0d9785478a829e3cad733f76a24c2f5 | 4,780 | py | Python | noisyopt/tests/test_noisyopt.py | 42Numeric/noisyopt | 0574f3e031f4f08a398f905ccf056e92a4982788 | [
"MIT"
] | null | null | null | noisyopt/tests/test_noisyopt.py | 42Numeric/noisyopt | 0574f3e031f4f08a398f905ccf056e92a4982788 | [
"MIT"
] | null | null | null | noisyopt/tests/test_noisyopt.py | 42Numeric/noisyopt | 0574f3e031f4f08a398f905ccf056e92a4982788 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.testing as npt
import noisyopt
def test_minimize():
deltatol = 1e-3
## basic testing without stochasticity
def quadratic(x):
return (x**2).sum()
res = noisyopt.minimize(quadratic, np.asarray([0.5, 1.0]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2, 0.9, 10.0, -0.3]),
deltatol=deltatol)
npt.assert_allclose(res.x, np.zeros(5), atol=deltatol)
npt.assert_equal(res.free, [False, False, False, False, False])
## test bound handling
res = noisyopt.minimize(quadratic, np.asarray([0.5, 0.5]),
bounds=np.asarray([[0, 1], [0, 1]]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([0.8, 0.8]),
bounds=np.asarray([[0.5, 1], [0.5, 1]]),
deltatol=deltatol)
npt.assert_allclose(res.x, [0.5, 0.5], atol=deltatol)
npt.assert_equal(res.free, [False, False])
## test determination of unconstrained variables
def quadratic_except_last(x):
return (x[:-1]**2).sum()
res = noisyopt.minimize(quadratic_except_last, np.asarray([0.5, 1.0]))
npt.assert_approx_equal(res.x[0], 0.0)
npt.assert_equal(res.free, [False, True])
## test errorcontrol for stochastic function
def stochastic_quadratic(x, seed=None):
prng = np.random if seed is None else np.random.RandomState(seed)
return (x**2).sum() + prng.randn(1) + 0.5*np.random.randn(1)
deltatol = 0.5
# test unpaired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
# test paired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True, paired=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
def test_bisect():
xtol = 1e-6
## simple tests
root = noisyopt.bisect(lambda x: x, -2, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
root = noisyopt.bisect(lambda x: x-1, -2, 2, xtol=xtol)
npt.assert_allclose(root, 1.0, atol=xtol)
## extrapolate if 0 outside of interval
root = noisyopt.bisect(lambda x: x, 1, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
npt.assert_raises(noisyopt.BisectException,
noisyopt.bisect, lambda x: x, 1, 2,
xtol=xtol, outside='raise')
## extrapolate with nonlinear function
root = noisyopt.bisect(lambda x: x+x**2, 1.0, 2, xtol=xtol)
assert root < 1.0
## test with stochastic function
xtol = 1e-1
func = lambda x: x - 0.25 + np.random.normal(scale=0.01)
root = noisyopt.bisect(noisyopt.AveragedFunction(func), -2, 2, xtol=xtol,
errorcontrol=True)
npt.assert_allclose(root, 0.25, atol=xtol)
def test_AveragedFunction():
## averaging a simple function
func = lambda x: np.asarray(x).sum()
avfunc = noisyopt.AveragedFunction(func, N=30)
av, avse = avfunc([1.0, 1.0])
npt.assert_equal(av, 2.0)
npt.assert_equal(avse, 0.0)
# se of function value difference between two points is zero
# (as function evaluation is not stochastic)
diffse = avfunc.diffse([1.0, 1.0], [2.0, 1.0])
npt.assert_equal(diffse, 0.0)
## changing the number of evaluations
avfunc.N *= 2
npt.assert_equal(avfunc.N, 60)
## averaging a stochastic function
func = lambda x: np.asarray(x).sum() + np.random.randn()
avfunc = noisyopt.AveragedFunction(func, N=30)
# check that reevaluation gives the same thing due to caching
av30_1, avse30_1 = avfunc([1.0, 1.0])
av30_2, avse30_2 = avfunc([1.0, 1.0])
npt.assert_equal(av30_1, av30_2)
npt.assert_equal(avse30_1, avse30_2)
# check that se decreases if
avfunc.N *= 2
av60, avse60 = avfunc([1.0, 1.0])
assert av30_1 != av60
assert avse30_1 > avse60
# test with floating point N
noisyopt.AveragedFunction(func, N=30.0, paired=True)
if __name__ == '__main__':
npt.run_module_suite()
| 37.34375 | 83 | 0.624477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.134937 |
6435690f9d571d32e57eed857a4c0b70172bd30d | 318 | py | Python | spielberg/admin.py | carlmjohnson/spielberg | 438dcc869e8dddb4fb3efc1d3f3816cdcf8529df | [
"MIT"
] | null | null | null | spielberg/admin.py | carlmjohnson/spielberg | 438dcc869e8dddb4fb3efc1d3f3816cdcf8529df | [
"MIT"
] | null | null | null | spielberg/admin.py | carlmjohnson/spielberg | 438dcc869e8dddb4fb3efc1d3f3816cdcf8529df | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import SimpleRedirect
@admin.register(SimpleRedirect)
class SimpleRedirectAdmin(admin.ModelAdmin):
list_display = [
'from_url',
'to_url',
'date_created',
'date_modified',
'date_active_start',
'date_active_end',
]
| 19.875 | 44 | 0.650943 | 214 | 0.672956 | 0 | 0 | 246 | 0.773585 | 0 | 0 | 83 | 0.261006 |
6436ac8ba5b8954f205ea6a742afb28c7e206a5a | 799 | py | Python | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=35):
self.nome = nome
self.idade = idade
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá{id(self)}'
if __name__ == '__main__':
thaila = Pessoa(nome='Thaila')
junior = Pessoa(thaila, nome='Junior')
print(Pessoa.cumprimentar(junior))
print(id(junior))
print(junior.cumprimentar())
print(junior.nome)
print(junior.idade)
for filho in junior.filhos:
print(filho.nome)
junior.sobrenome = 'Reis'
del junior.filhos
junior.olhos = 1
print(junior.__dict__)
print(thaila.__dict__)
print(Pessoa.olhos)
print(thaila.olhos)
print(junior.olhos)
print(id(Pessoa.olhos), id(thaila.olhos), id(junior.olhos))
| 25.774194 | 63 | 0.638298 | 230 | 0.2875 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.06125 |
6437ba4c34e60695d6c3884bb0f45f29bc524188 | 2,286 | py | Python | tools/pytest/extra/get_issues.py | servo-wpt-sync/web-platform-tests | 56e2df852354bc2b89e6d17a9dbafd280d24203c | [
"BSD-3-Clause"
] | 4 | 2020-09-09T15:28:01.000Z | 2021-12-01T00:59:56.000Z | tools/pytest/extra/get_issues.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2018-09-05T02:45:25.000Z | 2018-09-05T02:45:38.000Z | tools/pytest/extra/get_issues.py | 063095/web-platform-tests | 255d54144a82ce76d8e50a4aa8de284151119f8b | [
"BSD-3-Clause"
] | 1 | 2021-03-21T21:43:20.000Z | 2021-03-21T21:43:20.000Z | import json
import py
import textwrap
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
import requests
def get_issues():
chunksize = 50
start = 0
issues = []
while 1:
post_data = {"accountname": "pytest-dev",
"repo_slug": "pytest",
"start": start,
"limit": chunksize}
print ("getting from", start)
r = requests.get(issues_url, params=post_data)
data = r.json()
issues.extend(data["issues"])
if start + chunksize >= data["count"]:
return issues
start += chunksize
kind2num = "bug enhancement task proposal".split()
status2num = "new open resolved duplicate invalid wontfix".split()
def main(args):
cachefile = py.path.local(args.cache)
if not cachefile.exists() or args.refresh:
issues = get_issues()
cachefile.write(json.dumps(issues))
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues
if x["status"] in ("new", "open")]
def kind_and_id(x):
kind = x["metadata"]["kind"]
return kind2num.index(kind), len(issues)-int(x["local_id"])
open_issues.sort(key=kind_and_id)
report(open_issues)
def report(issues):
for issue in issues:
metadata = issue["metadata"]
priority = issue["priority"]
title = issue["title"]
content = issue["content"]
kind = metadata["kind"]
status = issue["status"]
id = issue["local_id"]
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
print("----")
print(status, kind, link)
print(title)
#print()
#lines = content.split("\n")
#print ("\n".join(lines[:3]))
#if len(lines) > 3 or len(content) > 240:
# print ("...")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("process bitbucket issues")
parser.add_argument("--refresh", action="store_true",
help="invalidate cache, refresh issues")
parser.add_argument("--cache", action="store", default="issues.json",
help="cache file")
args = parser.parse_args()
main(args)
| 30.48 | 81 | 0.575678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.284777 |
6437dee025675578c882a4948fef3abdfcfa15bf | 1,214 | py | Python | tests/bdd/test_environment_loader.py | necromuralist/Machine-Learning-From-Scratch | 6a1fe6264948dbd361e426fea9c9ed4c567e7079 | [
"MIT"
] | null | null | null | tests/bdd/test_environment_loader.py | necromuralist/Machine-Learning-From-Scratch | 6a1fe6264948dbd361e426fea9c9ed4c567e7079 | [
"MIT"
] | null | null | null | tests/bdd/test_environment_loader.py | necromuralist/Machine-Learning-From-Scratch | 6a1fe6264948dbd361e426fea9c9ed4c567e7079 | [
"MIT"
] | null | null | null | # coding=utf-8
"""Environment Loader feature tests."""
# from pypi
from expects import (
be_true,
expect
)
from pytest_bdd import (
given,
scenarios,
then,
when,
)
# for testing
from .fixtures import katamari
# software under test
from cse_575.data.common import Environment
# Setup
scenarios("../features/environment_loader.feature")
# ********** Keys ********** #
# @scenario('features/environment_loader.feature', 'The environment loader is built')
# def test_the_environment_loader_is_built():
# """The environment loader is built."""
@given('a built environment loader')
def a_built_environment_loader(katamari):
katamari.environment = Environment()
return
@when('the keys are checked')
def the_keys_are_checked(katamari):
environment = katamari.environment
expect(environment.raw_data_folder.is_dir()).to(be_true)
expect(environment.zero_test_images.is_file()).to(be_true)
expect(environment.one_test_images.is_file()).to(be_true)
expect(environment.zero_train_images.is_file()).to(be_true)
expect(environment.one_train_images.is_file()).to(be_true)
return
@then('it has the expected keys')
def it_has_the_expected_keys():
return
| 23.803922 | 85 | 0.728171 | 0 | 0 | 0 | 0 | 634 | 0.522241 | 0 | 0 | 424 | 0.349259 |
6438cc3d752def2341c27211310e7e6e164ba110 | 1,242 | py | Python | travel_blog/blog/models.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | 2 | 2016-12-17T02:56:22.000Z | 2017-07-19T05:13:59.000Z | travel_blog/blog/models.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | null | null | null | travel_blog/blog/models.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | 1 | 2018-12-19T06:34:37.000Z | 2018-12-19T06:34:37.000Z | from django.contrib.gis.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
import markdown
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField()
content = models.TextField()
content_html = models.TextField(editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published_at = models.DateTimeField(default=timezone.now)
coords = models.PointField(blank=True, null=True)
class Meta:
ordering = ['-published_at', '-id']
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.content_html = markdown.markdown(self.content)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'slug': self.slug})
@property
def latlng(self):
return (self.coords[1], self.coords[0])
class Image(models.Model):
upload = models.ImageField(upload_to='posts/images/')
description = models.TextField(blank=True, default='')
post = models.ForeignKey(Post, related_name='images')
def __str__(self):
return self.description
| 28.883721 | 65 | 0.694042 | 1,099 | 0.884863 | 0 | 0 | 79 | 0.063607 | 0 | 0 | 64 | 0.05153 |
64397ff100127d4d36ea57304ccc643da9e22b51 | 5,723 | py | Python | pycaishen/user_programs/PycaishenBasicUsage/tickers_and_fields.py | spyamine/pycaishen3 | 33f756fd0ad2bd05eb0d21fd4e7e8573e317a6a9 | [
"Apache-2.0"
] | 1 | 2019-06-17T10:17:30.000Z | 2019-06-17T10:17:30.000Z | pycaishen/user_programs/PycaishenBasicUsage/tickers_and_fields.py | spyamine/pycaishen3 | 33f756fd0ad2bd05eb0d21fd4e7e8573e317a6a9 | [
"Apache-2.0"
] | null | null | null | pycaishen/user_programs/PycaishenBasicUsage/tickers_and_fields.py | spyamine/pycaishen3 | 33f756fd0ad2bd05eb0d21fd4e7e8573e317a6a9 | [
"Apache-2.0"
] | null | null | null | import pandas
from pycaishen.util.loggermanager import LoggerManager
class AbstractReference(object):
"""
Abstract class for reference building
"""
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
def _csv_to_dataframe(self,csv_file,separator=None):
try:
if separator is None:
return pandas.read_csv(csv_file, dtype=object)
else:
return pandas.read_csv(csv_file,sep= separator, dtype=object)
except:
return None
def _excel_to_dataframe(self,excel_file,sheetname):
try:
return pandas.read_excel(excel_file, sheetname)
except:
return None
class Tickers(AbstractReference):
def __init__(self):
super(Tickers,self).__init__()
self.datasource_tickers=[]
self.tickers=[]
def setTickers(self,datasource_tickers,tickers = []):
if type(datasource_tickers) =="str":
datasource_tickers = [datasource_tickers]
self.datasource_tickers = datasource_tickers
if type(tickers) =="str":
tickers = [tickers]
if tickers != []:
self.tickers = tickers
else:
self.tickers = []
def fromCSV(self,csv_file,separator = None):
dataframe = self._csv_to_dataframe(csv_file,separator)
# print dataframe
# self.logger.warning("Problem with the csv file: " + str(csv_file))
datasource_tickers = []
tickers= []
try:
columns = list(dataframe.columns)
# print columns
if "datasource_tickers" in columns:
datasource_tickers = list(dataframe["datasource_tickers"])
if "tickers" in columns:
tickers = list(dataframe["tickers"])
self.setTickers(datasource_tickers,tickers)
except:
self.logger.warning("Problem with the csv file: "+str(csv_file) )
def fromExcel(self, excel_file, sheetname):
dataframe = self._excel_to_dataframe(excel_file, sheetname)
datasource_tickers = []
tickers = []
try:
columns = list(dataframe.columns)
# print columns
if "datasource_tickers" in columns:
datasource_tickers = list(dataframe["datasource_tickers"])
if "tickers" in columns:
tickers = list(dataframe["tickers"])
self.setTickers(datasource_tickers, tickers)
except:
self.logger.warning("Problem with the Excel file: " + str(excel_file)+ " "+ str(sheetname))
class Fields(AbstractReference):
def __init__(self):
super(Fields, self).__init__()
self.datasource_fields = []
self.fields = []
def setFields(self, datasource_fields, fields=[]):
if type(datasource_fields) == "str":
datasource_fields = [datasource_fields]
self.datasource_fields = datasource_fields
if type(fields) == "str":
fields = [fields]
if fields != []:
self.fields = fields
else :
self.fields = []
def fromCSV(self,csv_file,separator = None):
dataframe = self._csv_to_dataframe(csv_file, separator)
datasource_fields = []
fields = []
try:
columns = list(dataframe.columns)
# print columns
if "datasource_fields" in columns:
datasource_fields = list(dataframe["datasource_fields"])
if "fields" in columns:
fields = list(dataframe["fields"])
self.setFields(datasource_fields, fields)
except:
self.logger.warning("Problem with the csv file: " + str(csv_file))
def fromExcel(self,excel_file,sheetname):
dataframe = self._excel_to_dataframe(excel_file, sheetname)
datasource_fields = []
fields = []
try:
columns = list(dataframe.columns)
if "datasource_fields" in columns:
datasource_fields = list(dataframe["datasource_fields"])
if "fieldsthe" in columns:
fields = list(dataframe["fields"])
self.setFields(datasource_fields, fields)
except:
self.logger.warning("Problem with the Excel file: " + str(excel_file) + " " + str(sheetname))
if __name__ == '__main__':
from pycaishen.util.settings import configurer as setting
root = setting.ROOT_FOLDER
current_folder = root + "reference/"
print(current_folder)
ticker = Tickers()
# datasource_ticker=["ADH MC Equity","BCP MC Equity"]
# tickers_simple = ["ADH","BCP"]
# ticker.setTickers(datasource_ticker,tickers_simple)
# print ticker.datasource_tickers
# print ticker.tickers
# ticker.setTickers(datasource_ticker)
# print ticker.datasource_tickers
# print ticker.tickers
ticker_csv = current_folder +"tickers.csv"
tickers_excel = current_folder +"tickers.xlsx"
ticker.fromCSV(ticker_csv,";")
print((ticker.datasource_tickers))
print((ticker.tickers))
ticker.setTickers([])
sheetname = "Feuil1"
ticker.fromExcel(tickers_excel, sheetname)
print((ticker.datasource_tickers))
print((ticker.tickers))
print(("fields testing "+ 10* "**"))
fields = Fields()
ticker_csv = current_folder + "fields.csv"
tickers_excel = current_folder + "fields.xlsx"
fields.fromCSV(ticker_csv, ";")
print((fields.datasource_fields))
print((fields.fields))
fields.setFields([])
sheetname = "Feuil1"
fields.fromExcel(tickers_excel, sheetname)
print((fields.datasource_fields))
print((fields.fields))
| 30.935135 | 105 | 0.616984 | 4,311 | 0.753276 | 0 | 0 | 0 | 0 | 0 | 0 | 959 | 0.167569 |
643a1eeb03835780f3eb1f23a8745fefb60702f4 | 477 | py | Python | Lectures/PythonClass/P15_python_wordcloud.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Lectures/PythonClass/P15_python_wordcloud.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Lectures/PythonClass/P15_python_wordcloud.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | from collections import Counter
from konlpy.tag import Hannanum
import pytagcloud
f = open('D:\\KYH\\02.PYTHON\\crawled_data\\cbs2.txt', 'r', encoding='UTF-8')
data = f.read()
nlp = Hannanum()
nouns = nlp.nouns(data)
count = Counter(nouns)
tags2 = count.most_common(200)
taglist = pytagcloud.make_tags(tags2, maxsize=80)
pytagcloud.create_tag_image(taglist, 'D:\\KYH\\02.PYTHON\\crawled_data\\wordcloud.jpg', size=(400, 300), fontname='korean', rectangular=False)
f.close() | 29.8125 | 142 | 0.740042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.232704 |
643efcc62ffee5b784b925d11c6e4e3f66d82564 | 2,613 | py | Python | scripts/pi_scripts/runner.py | No-SF-Work/ayame | 6fb79b9897e75fdcf2a81083d462224226a0e4fa | [
"WTFPL"
] | 46 | 2021-08-19T11:24:32.000Z | 2022-02-04T13:17:29.000Z | scripts/pi_scripts/runner.py | GeneraluseAI/ayame | 6fb79b9897e75fdcf2a81083d462224226a0e4fa | [
"WTFPL"
] | null | null | null | scripts/pi_scripts/runner.py | GeneraluseAI/ayame | 6fb79b9897e75fdcf2a81083d462224226a0e4fa | [
"WTFPL"
] | 3 | 2021-09-20T01:44:10.000Z | 2022-01-14T04:19:00.000Z | import os
import subprocess
from pretty_print import Print_C
class Runner:
run_kases = 3
def __init__(self, scheme, testcases):
self.scheme = scheme
self.testcases = testcases
self.bin_file_template = f"build/test_results/{{testcase}}/bin/{scheme}"
self.myout_template = f"build/output/{{testcase}}/{scheme}.out"
self.runner_log = f"build/log/run_log/{{testcase}}/{scheme}_{{kase}}.out"
for testcase in testcases:
self.__generate_path(testcase)
def __generate_path(self, testcase):
myout_path = f"build/output/{testcase}/"
runner_log_path = f"build/log/run_log/{testcase}/"
if not os.path.exists(myout_path):
os.makedirs(myout_path)
if not os.path.exists(runner_log_path):
os.makedirs(runner_log_path)
def run_single_test(self, testcase, kase):
bin = self.bin_file_template.format(testcase=testcase)
stdin = f"testcases/{testcase}.in"
myout = self.myout_template.format(testcase=testcase)
log = self.runner_log.format(testcase=testcase, kase=kase)
myout_file = open(myout, "a+")
log_file = open(log, "a+")
null_file = open(os.devnull, "w")
Print_C.print_procedure(f"Running {self.scheme}_{testcase} [kase: {kase}]")
if os.path.exists(stdin):
stdin_file = open(stdin, "r")
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdin=stdin_file, stdout=null_file, stderr=log_file, bufsize=1)
stdin_file.close()
else:
if kase == 0:
p = subprocess.run(f"{bin}".split(), stdout=myout_file, stderr=log_file, bufsize=1)
subprocess.run(f"echo".split(), stdout=myout_file, bufsize=1)
subprocess.run(f"echo {p.returncode}".split(), stdout=myout_file, bufsize=1)
else:
p = subprocess.run(f"{bin}".split(), stdout=null_file, stderr=log_file, bufsize=1)
myout_file.close()
log_file.close()
def run_all_tests(self):
for kase in range(Runner.run_kases):
Print_C.print_subheader(f"[Running KASE {kase}]")
for testcase in self.testcases:
self.run_single_test(testcase=testcase, kase=kase)
| 37.328571 | 117 | 0.615385 | 2,549 | 0.975507 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.155377 |
644138068d0aa242e34e9494e5392a407d2e6756 | 4,694 | py | Python | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
app = Flask(__name__)
# Imports
import psutil
from flask import jsonify
import redis
import json
from os import urandom
import hashlib
import docker
from docker import APIClient
import requests
import zipfile
from celery import Celery
from celery.utils.log import get_task_logger
import os
import StringIO
import time
client = docker.from_env()
redis_cli = redis.StrictRedis(host='localhost', port=6380, db=0)
redis_shared = redis.StrictRedis(host='192.168.1.100', port=6381, db=0)
raw_cli = APIClient(base_url='unix://var/run/docker.sock')
# Celery config
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6380/0'
app.config['CELERY_TIMEZONE'] = 'UTC'
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
logger = get_task_logger(__name__)
# app.config['CELERYBEAT_SCHEDULE'] = {
# 'get-heartbeat': {
# 'task': 'get_heartbeat_task',
# 'schedule': 5.0,
# },
# }
fognodes = []
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/utilization')
def get_util():
cpu_count = psutil.cpu_count()
cpu_freq = {
'current': psutil.cpu_freq().current,
'max': psutil.cpu_freq().max,
'min': psutil.cpu_freq().min
}
cpu_percent = psutil.cpu_percent()
disk_util = {
'total': psutil.disk_usage('/').total,
'used': psutil.disk_usage('/').used,
'free': psutil.disk_usage('/').free,
'percent': psutil.disk_usage('/').percent
}
temperatures = psutil.sensors_temperatures()
swap_mem = {
'total': psutil.swap_memory().total,
'used': psutil.swap_memory().used,
'free': psutil.swap_memory().free,
'percent': psutil.swap_memory().percent
}
memory = {
'total': psutil.virtual_memory().total,
'available': psutil.virtual_memory().available,
'percent': psutil.virtual_memory().percent,
'used': psutil.virtual_memory().used,
'free': psutil.virtual_memory().free
}
utilization = {
'cpu_count': cpu_count,
'cpu_freq': cpu_freq,
'cpu_percent': cpu_percent,
'disk_util': disk_util,
'temperatures': temperatures,
'swap_memory': swap_mem,
'memory': memory,
'containers': len(client.containers.list())
}
return jsonify(utilization)
@app.route('/servicedata', methods=['POST'])
def propagate_data():
print request.data
form = json.loads(request.data)
redis_cli.set(str(form['service_id']+"-service_data"), form['service_data'])
parent_node = getParentNode(request)
request_uri = "http://{}:8080/servicedata/".format(parent_node)
# print request_uri
requests.post(request_uri, data=request.data)
return "OK"
def getParentNode(request):
#get parent from shared redis
parent = redis_shared.get('192.168.1.102')
print parent
return parent
def getChildren():
#get parent from shared redis
children = redis_cli.get('fognodes')
return children
@app.route('/heartbeat/')
def heartbeat():
return "OK"
def register_fog_master():
requests.get("http://192.168.1.100:8080/register/fognode/")
@app.route('/build_trigger/<service_id>')
def trigger_build(service_id):
get_service_data.delay(service_id)
return "OK"
@celery.task(name="get_service_data")
def get_service_data(service_id):
url = "http://192.168.1.100:8080/services/{}".format(service_id)
req = requests.get(url, stream=True)
try:
path = 'service-data/{}'.format(service_id)
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
z = zipfile.ZipFile(StringIO.StringIO(req.content))
z.extractall('service-data/{}'.format(service_id))
build_and_deploy(service_id)
def build_and_deploy(service_id):
service_data = redis_cli.get(service_id)
if not service_data:
command = "docker build -t rakshith/{} service-data/{}/".format(service_id, service_id)
print "Building"
os.system(command)
# a = client.images.build(fileobj=f)
# for line in raw_cli.build(fileobj=f, tag='rakshith/{}'.format(service_id), custom_context=True):
# print line
# # service_data = {}
service_data = 'rakshith/{}'.format(service_id)
redis_cli.set(service_id, service_data)
print "Time start"
start = time.time()
print client.containers.run(service_data)
end = time.time()
print "Time End - Total Time = {}".format(end-start)
if __name__ == '__main__':
register_fog_master()
app.run(debug=True, host='0.0.0.0', port=8080)
| 28.621951 | 110 | 0.659139 | 0 | 0 | 0 | 0 | 2,464 | 0.524925 | 0 | 0 | 1,234 | 0.262889 |
6441c2d5103ee3c27b662443bb780f5d7c5a8064 | 132 | py | Python | ppgr/__init__.py | PolarPayne/ppgr | a3ce482999bbd0c71042f18880553ebba60074c5 | [
"MIT"
] | null | null | null | ppgr/__init__.py | PolarPayne/ppgr | a3ce482999bbd0c71042f18880553ebba60074c5 | [
"MIT"
] | null | null | null | ppgr/__init__.py | PolarPayne/ppgr | a3ce482999bbd0c71042f18880553ebba60074c5 | [
"MIT"
] | null | null | null | from .terminal import write, no_cursor
from .screen import Screen
__all__ = ["write", "no_cursor", "Screen"]
__version__ = "0.5.0"
| 22 | 42 | 0.719697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.25 |
64428d8d713a83be8c76fc25d74c248dd372769d | 13,566 | py | Python | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | import json
from copy import deepcopy
from random import shuffle
cards = ['magician', 'high priestess', 'empress', 'emperor', 'hierophant', 'lovers', 'chariot', 'justice', 'hermit',
'wheel of fortune', 'strength', 'hanged man', 'death', 'temperance', 'devil', 'tower', 'star', 'moon', 'sun',
'judgement', 'world', 'fool', 'king of wands', 'queen of wands', 'knight of wands', 'page of wands',
'ten of wands', 'nine of wands', 'eight of wands', 'seven of wands', 'six of wands', 'five of wands',
'four of wands', 'three of wands', 'two of wands', 'ace of wands', 'king of cups', 'queen of cups',
'knight of cups', 'page of cups', 'ten of cups', 'nine of cups', 'eight of cups', 'seven of cups',
'six of cups', 'five of cups', 'four of cups', 'three of cups', 'two of cups', 'ace of cups', 'king of swords',
'queen of swords', 'knight of swords', 'page of swords', 'ten of swords', 'nine of swords', 'eight of swords',
'seven of swords', 'six of swords', 'five of swords', 'four of swords', 'three of swords', 'two of swords',
'ace of swords', 'king of coins', 'queen of coins', 'knight of coins', 'page of coins', 'ten of coins',
'nine of coins', 'eight of coins', 'seven of coins', 'six of coins', 'five of coins', 'four of coins',
'three of coins', 'two of coins', 'ace of coins']
upright = {'magician': 'creativity, self-confidence, dexterity, sleight of hand,will-power, skill',
'high priestess': 'knowledge, wisdom, learning, intuition, impatience, virtue, purity',
'empress': 'development, accomplishment action, evolution',
'emperor': 'authority, father-figure, structure, solid foundation',
'hierophant': 'mercy, conformity, forgiveness, social approval, bonded, inspiration',
'lovers': 'harmony, trust,romance, optimism, honor, love, harmony',
'chariot': 'perseverance, rushed decision, turmoil, vengeance, adversity',
'justice': 'equality, righteousness, virtue, honor, harmony, balance',
'hermit': 'inner strength, prudence, withdrawal, caution, vigilance',
'wheel of fortune': 'unexpected events, advancement, destiny, fortune, progress',
'strength': 'courage, conviction, strength, determination, action, heroism, virility',
'hanged man': 'change, reversal, boredom, improvement, rebirth, suspension, change',
'death': 'unexpected change, loss, failure, transformation, death, bad luck',
'temperance': 'temperance, patience, good influence, confidence, moderation',
'devil': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'tower': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'star': 'balance, pleasure, optimism, insight, spiritual love, hope, faith',
'moon': 'double-dealing Deception, disillusionment, trickery, error, danger, disgrace',
'sun': 'accomplishment, success, love, joy, happy marriage, satisfaction',
'judgement': 'awakening, renewal, rejuvenation, rebirth, improvement, promotion, atonement, judgment',
'world': 'perfection, recognition, success, fulfillment, eternal life',
'fool': 'beginnings possibilities, pleasure, thoughtlessness, adventure, opportunity',
'king of wands': 'passionate, good leader, noble',
'queen of wands': 'fondness, attraction, command ',
'knight of wands': 'generous, journey, impetuous',
'page of wands': 'enthusiasm, exploration, discovery, free spirit',
'ten of wands': 'pain, ruined, failure',
'nine of wands': 'victory, good health, obstinacy',
'eight of wands': 'new ideas, love, journey',
'seven of wands': 'stiff competition, victory, courage, energy',
'six of wands': 'leadership, good news, success',
'five of wands': 'lawsuit or quarrel, courage, competition',
'four of wands': 'dissatisfaction, kindness, reevaluation ',
'three of wands': 'cooperation, good partnership, success',
'two of wands': 'generous person, courage, patience, courage ',
'ace of wands': 'profitable journey, new business, beginning, new career, birth, inheritance',
'king of cups': 'kindness, willingness, enjoyment',
'queen of cups': 'loving mother, gentle, happiness',
'knight of cups': 'emotional, romantic dreamer, intelligence',
'page of cups': 'sweetness, interest in literature, gentleness',
'ten of cups': 'friendship, happiness, life',
'nine of cups': 'physical well-being, hopes, security',
'eight of cups': 'disappointment, abandonment, misery',
'seven of cups': 'imagination, illusion, directionless',
'six of cups': 'acquaintance, good memories, acquaintance, happiness',
'five of cups': 'broken marriage,vain regret, sorrow, loss',
'four of cups': 'dissatisfaction, kindness, reevaluation, redemption',
'three of cups': 'fortune, hospitality, discovery',
'two of cups': 'romance, friendship, cooperation',
'ace of cups': 'good health, love, joy, beauty',
'king of swords': 'powerful, friendship, counselor',
'queen of swords': 'skillful, brave, clever, rush',
'knight of swords': 'strong man, braver, clever person',
'page of swords': 'grace, diplomacy, dexterity, grace',
'ten of swords': 'defeat, failure, pain',
'nine of swords': 'desolation, illness, suspicion, cruelty',
'eight of swords': 'weakness, indecision, censure',
'seven of swords': 'betrayal, insolence, unwise attempt',
'six of swords': 'harmony, sorrow, journey',
'five of swords': 'defeat, cowardliness, empty victory',
'four of swords': 'temporary exile, strife, retreat',
'three of swords': 'broken relationship, civil war',
'two of swords': 'indecision, trouble, balanced',
'ace of swords': 'love, valiant, victory',
'king of coins': 'reliable person, steadiness ',
'queen of coins': 'thoughtfulness, intelligence, talents, melancholy ',
'knight of coins': 'dull outlook, patience, animal lover, trustworthy ',
'page of coins': 'kindness,new ideas/opinions, scholar ',
'ten of coins': 'wealth, property, stability ',
'nine of coins': 'solitude, well-being, green thumb ',
'eight of coins': 'employment, money, learning, trade',
'seven of coins': 'development, re-evaluation, effort, hard work ',
'six of coins': 'prosperity, philanthropy, charity, gifts ',
'five of coins': 'destitution, poor health, despair, loneliness ',
'four of coins': 'ungenerous, greed, miserly ',
'three of coins': 'abilities, approval,effort, abilities ',
'two of coins': 'harmony, new projects, helpful ',
'ace of coins': 'prosperity, happiness, pleasure'}
reverse = {'magician': 'delay, unimaginative, insecurity, lack of self-confidence',
'high priestess': 'selfishness, shallowness, misunderstanding, ignorance',
'empress': 'inaction, lack on concentration, vacillation, anxiety, infidelity',
'emperor': 'domination, excessive control, rigidity, inflexibility',
'hierophant': 'vulnerability, unconventionality, foolish generosity, impotence, frailty, unorthodoxy',
'lovers': 'separation, frustration, unreliability,fickleness, untrustworthy',
'chariot': 'vanquishment, defeat, failure, unsuccessful',
'justice': 'alse accusation, unfairness, abuse, biased',
'hermit': 'hastiness, rashness,immaturity, imprudence, foolishness',
'wheel of fortune': 'interruption, outside influences, failure, bad luck',
'strength': 'pettiness, sickness, unfaithfulness, weakness',
'hanged man': 'alse prophecy, useless sacrifice, unwillingness',
'death': 'immobility, slow changes, cheating, death, stagnation',
'temperance': 'conflict, disunion, frustration, impatience, discord',
'devil': 'release, enlightenment, divorce, recovery',
'tower': 'entrapment, imprisonment, old ways, rustic',
'star': 'disappointment, bad luck, imbalance, broken dreams',
'moon': 'trifling mistakes, deception discovered, negative advantage',
'sun': 'loneliness, canceled plans, unhappiness, break ups',
'judgement': 'disappointment, indecision, death, failure, ill-health, theft, worry',
'world': 'ack of vision, disappointment, imperfection',
'fool': 'indecision, hesitation, injustice, apathy, bad choice',
'king of wands': 'unyielding, prejudice, quarrels',
'queen of wands': 'jealous, revengeful, infidelity',
'knight of wands': 'suspicion, jealousy, narrow-mindedness',
'page of wands': 'setbacks to new ideas, pessimism, lack of direction',
'ten of wands': 'cleverness, energy, strength',
'nine of wands': 'weakness, ill-health, adversity',
'eight of wands': 'violence, quarrels, courage',
'seven of wands': 'advantage, patience, indecision',
'six of wands': 'postponement, bad news, pride in riches',
'five of wands': 'new opportunities, harmony, generosity',
'four of wands': 'new relationship, new ambitions, action',
'three of wands': 'carelessness, arrogance, pride, mistakes',
'two of wands': 'impatience, domination',
'ace of wands': 'selfishness, lack of determination, setback',
'king of cups': 'double-dealer, scandal, crafty, violent',
'queen of cups': 'perverse, unhappy, gloom, over-active imagination',
'knight of cups': 'idleness, untruthful, fraud, sensuality',
'page of cups': 'poor imagination, selfishness, no desires',
'ten of cups': 'waste, broken relationships, quarrel',
'nine of cups': 'illness, failure, overindulgence',
'eight of cups': 'pleasure, success, joy',
'seven of cups': 'will-power, determination',
'six of cups': 'friendship, disappointment, past',
'five of cups': 'return, summon, hope',
'four of cups': 'new goals, ambitions, beginning',
'three of cups': 'hidden, overindulgence, pain, gossip',
'two of cups': 'violent passion, misunderstanding',
'ace of cups': 'egotism, selfishness, hesitancy',
'king of swords': 'obstinate, evil intentions, judgments',
'queen of swords': 'sly, keen, deceitful',
'knight of swords': 'troublemaker, a crafty, tyranny',
'page of swords': 'imposture, ill-health, cunningness',
'ten of swords': 'courage, positive energy, good health',
'nine of swords': 'unselfishness, good news, healing',
'eight of swords': 'freedom, new beginnings, relaxation',
'seven of swords': 'counsel, helpful, advice',
'six of swords': 'obstacles, difficulties, defeat',
'five of swords': 'unfairness, defeat, loss',
'four of swords': 'social unrest, labor strikes, renewed activity',
'three of swords': 'sorrow, loss, confusion',
'two of swords': 'unscrupulous, release',
'ace of swords': 'obstacles, tyranny, power',
'king of coins': 'bribes, materialistic, calm',
'queen of coins': 'mistrust, suspicion, neglect',
'knight of coins': 'carelessness, standstill, irresponsible',
'page of coins': 'luxury, rebellious, bad news',
'ten of coins': 'dull, slothfulness, misfortune',
'nine of coins': 'caution, possible loss',
'eight of coins': 'void, no ambition, dislike',
'seven of coins': 'impatience, slow progress, investments',
'six of coins': 'jealousy, miserliness, unfairness',
'five of coins': 'employment, courage, revival',
'four of coins': 'spendthrift, obstacles, earthy possessions',
'three of coins': 'preoccupation, ambitions',
'two of coins': 'difficulty, discouragement',
'ace of coins': 'misery, greedy, money'}
class TarotDeck:
def __init__(self):
self.deck = None
self.reset()
def draw(self):
if len(self.deck) == 0:
return 'the deck is empty. use !tarot reset to reset the deck', None
card = self.deck.pop()
ret = []
if card in {'justice', 'strength', 'death', 'temperance', 'judgement'}:
ret.append('you drew {}'.format(card))
ret.append('you drew the {}'.format(card))
ret.append('upright meaning: {}'.format(upright[card]))
ret.append('reverse meaning: {}'.format(reverse[card]))
return '\n'.join(ret), '{}.jpg'.format(card.replace(' ', '_'))
def reset(self):
self.deck = deepcopy(cards)
shuffle(self.deck)
return 'tarot deck reset'
def save(self, save_path):
with open(save_path, 'w') as file:
json.dump(self.deck, file)
print('saved deck')
def load(self, load_path):
with open(load_path, 'r') as file:
self.deck = json.load(file)
print('loaded deck')
| 65.221154 | 120 | 0.618605 | 1,054 | 0.077694 | 0 | 0 | 0 | 0 | 0 | 0 | 10,080 | 0.743034 |
644292c4d61839088d132ddd059480c729616e9c | 2,604 | py | Python | RL.py | apurva-rai/Reinforcement_Learning | 9b979b9dd1f63cb966be3230f1aab494d0d477cb | [
"MIT"
] | null | null | null | RL.py | apurva-rai/Reinforcement_Learning | 9b979b9dd1f63cb966be3230f1aab494d0d477cb | [
"MIT"
] | null | null | null | RL.py | apurva-rai/Reinforcement_Learning | 9b979b9dd1f63cb966be3230f1aab494d0d477cb | [
"MIT"
] | null | null | null | import numpy as np
#SARSA class that has function to train and test simple treasure finding path
class SARSA:
def __init__(self,a,r,action,reward,Q):
if a is None:
self.a = 0.5
if r is None:
self.r = 0.75
self.a = a
self.r = r
self.action = action
self.reward = reward
self.Q = Q
def trainer(self,i):
for j in range(i):
state = np.random.randint(0,int(len(self.Q)))
currentActions = []
for k in range(int(len(self.Q))):
if 0 <= self.reward[state,k]:
currentActions.append(k)
next = int(np.random.choice(currentActions,1))
nextActions = []
for act,val in enumerate(self.Q[next,]):
if 0 < val:
nextActions.append(act)
nextAction = int(np.random.choice(nextActions,1))
timeDiff = self.reward[state,next] + self.r * self.Q[next,nextAction] - self.Q[state,next]
self.Q[state,next] += self.a * timeDiff
def tester(self,start,end):
state = start
path = [state]
while state != end:
nextActions = []
for act,val in enumerate(self.Q[state,]):
if 0 < val:
nextActions.append(act)
next = int(np.random.choice(nextActions,1))
path.append(next)
state = next
return path
#Q-Learning class that has functions to train and test simple treasure finding path
class QL:
def __init__(self, a, r, action, reward, Q):
if a is None:
self.a = 0.5
if r is None:
self.r = 0.75
self.a = a
self.r = r
self.action = action
self.reward = reward
self.Q = Q
def trainer(self, i):
for j in range(i):
state = np.random.randint(0, int(len(self.Q)))
currentActions = []
for k in range(int(len(self.Q))):
if 0 <= self.reward[state,k]:
currentActions.append(k)
nextState = int(np.random.choice(currentActions,1))
timeDiff = self.reward[state,nextState] + self.r * self.Q[nextState,np.argmax(self.Q[nextState,])] - self.Q[state,nextState]
self.Q[state,nextState] += self.a * timeDiff
def tester(self,start,end):
state = start
path = [state]
while state != end:
next = np.argmax(self.Q[state,])
path.append(next)
state = next
return path
| 27.702128 | 136 | 0.519585 | 2,419 | 0.928955 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.061444 |
6442d0f58b9da08f14b0b72fb7a653476fb790c0 | 15,068 | py | Python | pyfda/libs/pyfda_fft_windows_lib.py | cedricboudinet/pyFDA | fd75970486d931c2b58e03683f2617eefb75d1d3 | [
"MIT"
] | null | null | null | pyfda/libs/pyfda_fft_windows_lib.py | cedricboudinet/pyFDA | fd75970486d931c2b58e03683f2617eefb75d1d3 | [
"MIT"
] | null | null | null | pyfda/libs/pyfda_fft_windows_lib.py | cedricboudinet/pyFDA | fd75970486d931c2b58e03683f2617eefb75d1d3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Store the available fft windows and their properties
"""
import logging
logger = logging.getLogger(__name__)
import importlib
import numpy as np
import scipy.signal as sig
import scipy
windows =\
{'Boxcar':
{'fn_name':'boxcar',
'info':
('<span>Rectangular (a.k.a. "Boxcar") window, best suited for coherent signals, i.e. '
'where the window length is an integer number of the signal period. '
'It also works great when the signal length is shorter than the window '
'length (e.g. for the impulse response of a FIR filter). For other signals, '
'it has the worst sidelobe performance of all windows.<br /> <br />'
'This window also has the best SNR of all windows.</span>'),
'props':{
'enbw':1,
'cgain':1,
'bw':1
}
},
'Rectangular':
{'fn_name':'boxcar',
'info':
('<span>Boxcar (a.k.a. "Rectangular") window, best suited for coherent signals, i.e. '
'where the window length is an integer number of the signal period. '
'It also works great when the signal length is shorter than the window '
'length (e.g. for the impulse response of a FIR filter). For other signals, '
'it has the worst sidelobe performance of all windows.<br /> <br />'
'This window also has the best SNR of all windows.</span>')
},
'Barthann':
{'fn_name':'barthann',
'info':
('<span>A modified Bartlett-Hann Window.'
'</span>')},
'Bartlett':
{'fn_name':'bartlett',
'info':'<span>The Bartlett window is very similar to a triangular window, '
'except that the end points are at zero. Its side lobes fall off with '
'12 dB/oct., the side lobe suppression is xx dB.'
'<br /> <br />'
'Its Fourier transform is the product of two (periodic) sinc functions.<span>'},
'Blackman':
{'fn_name':'blackman'},
'Blackmanharris':
{'fn_name':'blackmanharris',
'info':
('<span>The minimum 4-term Blackman-Harris window gives an excellent '
'constant side-lobe suppression of more than 90 dB while keeping a '
'reasonably narrow main lobe.</span>')
},
'Blackmanharris_5':
{'fn_name':'pyfda.libs.pyfda_fft_windows_lib.blackmanharris5',
'info':
('<span>The 5-term Blackman-Harris window with a side-'
'lobe suppression of up to 125 dB.</span>')
},
'Blackmanharris_7':
{'fn_name':'pyfda.libs.pyfda_fft_windows_lib.blackmanharris7',
'info':
('<span>The 7-term Blackman-Harris window with a side-'
'lobe suppression of up to 180 dB.</span>')
},
'Blackmanharris_9':
{'fn_name':'pyfda.libs.pyfda_fft_windows_lib.blackmanharris9',
'info':
('<span>The 9-term Blackman-Harris window with a side-'
'lobe suppression of up to 230 dB.</span>')
},
'Bohman':
{'fn_name':'bohman'},
'Dolph-Chebychev':
{'fn_name':'chebwin',
'par':[{
'name':'a', 'name_tex':r'$a$',
'val':80, 'min':45, 'max':300,
'tooltip':'<span>Side lobe attenuation in dB.</span>'}],
'info':
('<span>This window optimizes for the narrowest main lobe width for '
'a given order <i>M</i> and sidelobe equiripple attenuation <i>a</i>, '
'using Chebychev polynomials.</span>'),
},
'Cosine':
{'info':
('<span>The window is half a cosine period, shifted by pi/2. '
'For that reason it is also known as "half-cosine" or "sine" window.</span>'),
},
'Flattop':
{'win_fn_name':'flattop'},
'General Gaussian':
{'fn_name':'general_gaussian',
'par':[{
'name':'p','name_tex':r'$p$',
'val':1.5, 'min':0, 'max':20,
'tooltip':'<span>Shape parameter p</span>'
},
{
'name':'σ','name_tex':r'$\sigma$',
'val':5, 'min':0, 'max':100,
'tooltip':'<span>Standard deviation σ</span>'}
],
'info':
('<span>General Gaussian window, <i>p</i> = 1 yields a Gaussian window, '
'<i>p</i> = 0.5 yields the shape of a Laplace distribution.'
'</span>'),
},
'Gauss':
{'fn_name':'gaussian',
'par':[{
'name':'σ', 'name_tex':r'$\sigma$',
'val':5,'min':0, 'max':100,
'tooltip':'<span>Standard deviation σ</span>'}],
'info':
('<span>Gaussian window '
'</span>')
},
'Hamming':
{'fn_name':'hamming',
'info':
('<span>The Hamming Window has been optimized for suppression of '
'the first side lobe. Compared to the Hann window, this comes at '
'the cost of a worse (constant) level of higher side lobes.'
'<br /> <br />Mathematically, it is a two-term raised cosine window with '
'non-zero endpoints (DC-offset).</span>')
},
'Hann':
{'fn_name':'hann',
'info':
'<span>The Hann (or, falsely, "Hanning") window is smooth at the '
'edges. In the frequency domain this corresponds to side-lobes falling '
'off with a rate of 18 dB/oct or 30 dB/dec. The first sidelobe is quite '
'high (-32 dB). It is a good compromise for many applications, especially '
'when higher frequency components need to be suppressed.'
'<br /> <br />'
'Mathematically, it is the most simple two-term raised cosine '
'or squared sine window.</span>'},
'Kaiser':
{'fn_name':'kaiser',
'par':[{
'name':'β', 'name_tex':r'$\beta$',
'val':10, 'min':0, 'max':30,
'tooltip':
('<span>Shape parameter; lower values reduce main lobe width, '
'higher values reduce side lobe level, typ. in the range '
'5 ... 20.</span>')}],
'info':
'<span>The Kaiser window is a very good approximation to the '
'Digital Prolate Spheroidal Sequence (DPSS), or Slepian window, '
'which maximizes the energy in the main lobe of the window relative '
'to the total energy.</span>'
},
'Nuttall':{},
'Parzen':{
'info':
'<span>The Parzen window is a 4th order B-spline window whose side-'
'lobes fall off with -24 dB/oct.'
'<br/ > <br />'
'It can be constructed by convolving '
'a rectangular window four times (or multiplying its frequency response '
'four times).'
'<br /> <br />'
'See also: Boxcar and Triangular / Bartlett windows.</span>'},
'Slepian':
{'fn_name':'slepian',
'par':[{
'name':'BW', 'name_tex':r'$BW$',
'val':0.3, 'min':0, 'max':100,
'tooltip':'<span>Bandwidth</span>'}],
'info':
'<span>Used to maximize the energy concentration in the main lobe. '
' Also called the digital prolate spheroidal sequence (DPSS).'
'<br /> <br />'
'See also: Kaiser window.'
'</span>'
},
'Triangular':{'fn_name':'triang'},
# 'Ultraspherical':
# {'fn_name':'pyfda.pyfda_fft_windows.ultraspherical',
# 'par':[{
# 'name':'α','name_tex':r'$\alpha$',
# 'val':0.5, 'min':-0.5, 'max':10,
# 'tooltip':'<span>Shape parameter α or μ</span>'
# },
# {
# 'name':'x0','name_tex':r'$x_0$',
# 'val':1, 'min':-10, 'max':10,
# 'tooltip':'<span>Amplitude</span>'}
# ],
# 'info':
# ('<span>Ultraspherical or Gegenbauer window, <i>p</i> = 1 yields a Gaussian window, '
# '<i>p</i> = 0.5 yields the shape of a Laplace distribution.'
# '</span>'),
# }
}
def get_window_names():
"""
Extract window names (= keys) from the windows dict and return and a list
with all the names (strings).
"""
win_name_list = []
for d in windows:
win_name_list.append(d)
return sorted(win_name_list)
def calc_window_function(win_dict, win_name, N=32, sym=True):
"""
Generate a window function.
Parameters
----------
win_dict : dict
The dict where the window functions are stored.
win_name : str
Name of the window, this will be looked for in scipy.signal.windows.
N : int, optional
Number of data points. The default is 32.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
win_fnct : ndarray
The window function
"""
par = []
info = ""
if win_name not in windows:
logger.warning("Unknown window name {}, using rectangular window instead.".format(win_name))
win_name = "Boxcar"
d = windows[win_name]
if 'fn_name' not in d:
fn_name = win_name.lower()
else:
fn_name = d['fn_name']
if 'par' in d:
par = d['par']
n_par = len(par)
else:
par = []
n_par = 0
if 'info' in d:
info = d['info']
#--------------------------------------
# get attribute fn_name from submodule (default: sig.windows) and
# return the desired window function:
mod_fnct = fn_name.split('.') # try to split fully qualified name
fnct = mod_fnct[-1]
if len(mod_fnct) == 1:
# only one element, no modules given -> use scipy.signal.windows
win_fnct = getattr(sig.windows, fnct, None)
else:
# remove the leftmost part starting with the last '.'
mod_name = fn_name[:fn_name.rfind(".")]
mod = importlib.import_module(mod_name)
win_fnct = getattr(mod, fnct, None)
if not win_fnct:
logger.error("No window function {0} in scipy.signal.windows, using rectangular window instead!"\
.format(fn_name))
fn_name = "boxcar"
win_fnct = getattr(scipy, fn_name, None)
win_dict.update({'name':win_name, 'fnct':fn_name, 'info':info,
'par':par, 'n_par':n_par, 'win_len':N})
if n_par == 0:
return win_fnct(N,sym=sym)
elif n_par == 1:
return win_fnct(N, par[0]['val'], sym=sym)
elif n_par == 2:
return win_fnct(N, par[0]['val'], par[1]['val'], sym=sym)
else:
logger.error("{0:d} parameters is not supported for windows at the moment!".format(n_par))
def blackmanharris5(N, sym):
""" 5 Term Cosine, 125.427 dB, NBW 2.21535 bins, 9.81016 dB gain """
a = [3.232153788877343e-001,
-4.714921439576260e-001,
1.755341299601972e-001,
-2.849699010614994e-002,
1.261357088292677e-003]
return calc_cosine_window(N, sym, a)
def blackmanharris7(N, sym):
""" 7 Term Cosine, 180.468 dB, NBW 2.63025 bins, 11.33355 dB gain"""
a = [2.712203605850388e-001,
-4.334446123274422e-001,
2.180041228929303e-001,
-6.578534329560609e-002,
1.076186730534183e-002,
-7.700127105808265e-004,
1.368088305992921e-005]
return calc_cosine_window(N, sym, a)
def blackmanharris9(N, sym):
""" 9 Term Cosine, 234.734 dB, NBW 2.98588 bins, 12.45267 dB gain"""
a = [2.384331152777942e-001,
-4.005545348643820e-001,
2.358242530472107e-001,
-9.527918858383112e-002,
2.537395516617152e-002,
-4.152432907505835e-003,
3.685604163298180e-004,
-1.384355593917030e-005,
1.161808358932861e-007]
return calc_cosine_window(N, sym, a)
def calc_cosine_window(N, sym, a):
if sym:
L = N-1
else:
L = N
x = np.arange(N) * 2 * np.pi / L
win = a[0]
for k in range(1,len(a)):
win += a[k] * np.cos(k*x)
return win
def ultraspherical(N, alpha = 0.5, x_0 = 1, sym=True):
if sym:
L = N-1
else:
L = N
#x = np.arange(N) * np.pi / (N)
geg_ev = scipy.special.eval_gegenbauer
w0= geg_ev(N, alpha, x_0)
w=np.zeros(N)
# a = 2
# for n in range(5 + 1):
# x = np.linspace(-1.1, 1.1, 5001)
# y = eval_gegenbauer(n, a, x)
# plt.plot(x, y, label=r'$C_{%i}^{(2)}$' % n, zorder=-n)
# plt.ylim((-10,10))
for n in range(0,N):
w[n] = w0
for k in range(1,N//2+1):
w[n] += geg_ev(N, alpha, x_0 * np.cos(k*np.pi/(N+1))) * np.cos(2*n*np.pi*k/(N+1))
# rtn += np.cos(x*k)
#w = geg_ev(N-1, alpha, x_0 * np.cos(x))
#logger.error(W[0].dtype, len(W))
#W = np.abs(fft.ifft(w))
#logger.error(type(w[0].dtype), len(w))
return w
class UserWindows(object):
def __init__(self, parent):
super(UserWindows, self).__init__(parent)
# =======
# see also:
# https://www.electronicdesign.com/technologies/analog/article/21798689/choose-the-right-fft-window-function-when-evaluating-precision-adcs
# https://github.com/capitanov/blackman_harris_win
# https://en.m.wikipedia.org/wiki/Window_function
# https://www.dsprelated.com/freebooks/sasp/Blackman_Harris_Window_Family.html
# https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/46092/versions/3/previews/coswin.m/index.html
# Refs:
# "A Family of Cosine-Sum Windows for High-Resolution Measurements"
# Hans-Helge Albrecht
# Physikalisch-Technische Bundesanstalt
# Acoustics, Speech, and Signal Processing, 2001. Proceedings. (ICASSP '01).
# 2001 IEEE International Conference on (Volume:5 )
# pgs. 3081-3084
# "Tailoring of Minimum Sidelobe Cosine-Sum Windows for High-Resolution Measurements"
# Hans-Helge Albrecht
# Physikalisch-Technische Bundesanstalt (PTB), Berlin, Germany
# The Open Signal Processing Journal, 2010, 3, pp. 20-29
# Heinzel G. et al.,
# "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT),
# including a comprehensive list of window functions and some new flat-top windows",
# February 15, 2002
# https://holometer.fnal.gov/GH_FFT.pdf | 37.29703 | 140 | 0.554685 | 108 | 0.007167 | 0 | 0 | 0 | 0 | 0 | 0 | 9,270 | 0.61517 |
6442f3e788a9a60ebaf60333e7bbf4b6be2a9475 | 30,541 | py | Python | src/data/argoverse/dataset.py | ybarancan/STSU | 0b9efa88739c517a7ca00e61faefa4b45714d312 | [
"Apache-2.0"
] | 67 | 2021-10-06T13:48:44.000Z | 2022-03-31T04:00:20.000Z | src/data/argoverse/dataset.py | w23215/STSU | 0b9efa88739c517a7ca00e61faefa4b45714d312 | [
"Apache-2.0"
] | 6 | 2021-11-03T08:37:31.000Z | 2022-03-19T10:13:10.000Z | src/data/argoverse/dataset.py | w23215/STSU | 0b9efa88739c517a7ca00e61faefa4b45714d312 | [
"Apache-2.0"
] | 10 | 2021-12-08T10:54:45.000Z | 2022-03-22T06:44:14.000Z | import os
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision.transforms.functional import to_tensor
from src.utils import bezier
from .utils import IMAGE_WIDTH, IMAGE_HEIGHT, ARGOVERSE_CLASS_NAMES
from ..utils import decode_binary_labels
import numpy as np
import logging
import sys
import cv2
from scipy.ndimage import gaussian_filter
class ArgoverseMapDataset(Dataset):
def __init__(self, config, loader, am,
log_names=None, train=True, pinet=False, work_objects=True):
self.image_size = config.image_size
self.config = config
self.examples = []
self.pinet = pinet
self.am = am
self.calibs = dict()
# Preload training examples from Argoverse train and test sets
self.loader = loader
self.n_control = 3
self.camera = "ring_front_center"
self.obj_dict = np.load(config.argo_obj_dict_path,allow_pickle=True)
self.resolution = config.map_resolution
self.preload(loader, log_names)
self.work_objects = work_objects
logging.error('ARGO LOADED')
def preload(self, loader, log_names=None):
for my_scene_id in range(len(log_names)):
log = loader.get(log_names[my_scene_id])
#
n_frames_in_scene = log.num_lidar_frame
for k in range(n_frames_in_scene):
timestamp = str(np.copy(loader._image_timestamp_list_sync[log_names[my_scene_id]][self.camera][k]))
#
#
self.examples.append((timestamp,log_names[my_scene_id], k))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
# Get the split, log and camera ids corresponding to the given timestamp
try:
timestamp, logid, ind = self.examples[idx]
image = self.load_image(logid, timestamp)
calib = self.load_calib(logid)
obj_to_return, center_width_orient,con_matrix,endpoints, orig_img_centers, origs, mask, bev_mask,\
to_return_centers, labels,roads,coeffs,\
outgoings, incomings, problem, obj_exists = self.load_line_labels(timestamp, logid, ind)
if problem:
logging.error('THERE WAS PROBLEM')
return (None, dict(), True)
if self.work_objects:
#1.5 is camera height
if len(center_width_orient) > 0:
my_calib = calib.cpu().numpy()
obj_center = center_width_orient[:,:2]
obj_x = obj_center[:,0]*(self.config.map_extents[2]-self.config.map_extents[0]) + self.config.map_extents[0]
obj_y = obj_center[:,1]*(self.config.map_extents[3]-self.config.map_extents[1]) + self.config.map_extents[1]
img_x = (obj_x*my_calib[0,0] + obj_y*my_calib[0,-1])/(obj_y + 0.0001)
img_y = (1.5*my_calib[1,1] + obj_y*my_calib[1,-1])/(obj_y + 0.0001)
img_x = img_x / self.image_size[0]
img_y = img_y / self.image_size[1]
to_keep = np.logical_not((img_x > 1) | (img_x < 0) | (img_y > 1) | (img_y < 0))
img_centers = np.stack([img_x,img_y],axis=-1)
if np.sum(to_keep) == 0:
img_centers = []
center_width_orient = []
obj_to_return = []
obj_exists = False
else:
img_centers = img_centers[to_keep]
center_width_orient = center_width_orient[to_keep]
obj_to_return = obj_to_return[to_keep]
else:
img_centers = []
init_points = np.reshape(endpoints,(-1,2,2))[:,0]
sorted_init_points, sort_index = self.get_sorted_init_points(init_points)
temp_ar = np.zeros((len(sorted_init_points),2*self.config.polyrnn_feat_side,2*self.config.polyrnn_feat_side))
for k in range(len(sorted_init_points)):
temp_ar[k,int(np.clip(sorted_init_points[k,1]*2*self.config.polyrnn_feat_side,0,2*self.config.polyrnn_feat_side-1)),int(np.clip(sorted_init_points[k,0]*2*self.config.polyrnn_feat_side,0,2*self.config.polyrnn_feat_side-1))]=1
temp_ar[k] = gaussian_filter(temp_ar[k], sigma=0.1)
temp_ar[k] = temp_ar[k]/np.max(temp_ar[k])
# sorted_points = np.copy(np.ascontiguousarray(coeffs[sort_index,:]))
sorted_points = np.copy(coeffs)
grid_sorted_points = np.reshape(sorted_points,(-1,self.n_control ,2))
grid_sorted_points[...,0]= np.int32(grid_sorted_points[...,0]*(self.config.polyrnn_feat_side - 1))
grid_sorted_points[...,1]= np.int32(grid_sorted_points[...,1]*(self.config.polyrnn_feat_side - 1))
my_grid_points = np.copy(np.ascontiguousarray(grid_sorted_points))
target = dict()
target['mask'] = torch.tensor(mask).float()
target['bev_mask'] = bev_mask
target['static_mask'] = torch.zeros(self.config.num_bev_classes,mask.shape[1],mask.shape[2])
target['calib'] = calib.float()
target['center_img'] = to_return_centers
target['orig_center_img'] = orig_img_centers
target['labels'] = labels.long()
target['roads'] = torch.tensor(np.int64(roads)).long()
target['control_points'] = torch.tensor(coeffs)
target['con_matrix'] = torch.tensor(con_matrix)
target['obj_exists'] = obj_exists
if self.work_objects:
target['obj_corners'] = torch.tensor(obj_to_return).float()
target['obj_converted'] = torch.tensor(center_width_orient).float()
target['obj_exists'] = torch.tensor(obj_exists)
target['init_point_matrix'] = torch.tensor(np.copy(np.ascontiguousarray(temp_ar))).float()
target['sorted_control_points'] = torch.tensor(sorted_points).float()
target['grid_sorted_control_points'] = torch.tensor(my_grid_points).float()
target['sort_index'] = torch.tensor(np.copy(np.ascontiguousarray(sort_index)))
target['endpoints'] = torch.tensor(endpoints).float()
target['origs'] = torch.tensor(origs)
target['scene_token'] = logid
target['sample_token'] = timestamp
target['data_token'] = logid
target['scene_name'] = logid
target['outgoings'] = outgoings
target['incomings'] = incomings
target['left_traffic'] = torch.tensor(False)
return (image, target, False)
except Exception as e:
logging.error('ARGO DATALOADER ' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(str((exc_type, fname, exc_tb.tb_lineno)))
return (None, dict(), True)
def load_image(self, log_id, timestamp):
# Load image
# loader = self.loader
# image_file = loader._timestamp_image_dict[log_id][self.camera][timestamp]
image_file = os.path.join(self.config.argo_log_root,log_id,'ring_front_center','ring_front_center_'+str(timestamp)+'.jpg')
image = Image.open(image_file)
image = np.array(image,np.float32)
if self.pinet:
image = cv2.resize(image, (512,256), cv2.INTER_LINEAR)[...,[2,1,0]]
else:
image = cv2.resize(image, (self.config.patch_size[0], self.config.patch_size[1]), cv2.INTER_LINEAR)
image = np.float32(image)
image = self.minmax_normalize(image, norm_range=(-1, 1))
return to_tensor(image).float()
def minmax_normalize(self,img, norm_range=(0, 1), orig_range=(0, 255)):
# range(0, 1)
norm_img = (img - orig_range[0]) / (orig_range[1] - orig_range[0])
# range(min_value, max_value)
norm_img = norm_img * (norm_range[1] - norm_range[0]) + norm_range[0]
return norm_img
def load_calib(self, log):
# Get the loader for the current split
loader = self.loader
# Get intrinsics matrix and rescale to account for downsampling
calib = np.copy(loader.get_calibration(self.camera, log).K[:,:3])
calib[0] *= self.image_size[0] / IMAGE_WIDTH
calib[1] *= self.image_size[1] / IMAGE_HEIGHT
# Convert to a torch tensor
return torch.from_numpy(calib)
def load_labels(self, split, log, camera, timestamp):
# Construct label path from example data
label_path = os.path.join(self.label_root, split, log, camera,
timestamp, f'{camera}_{timestamp}.png')
# Load encoded label image as a torch tensor
encoded_labels = to_tensor(Image.open(label_path)).long()
# Decode to binary labels
num_class = len(ARGOVERSE_CLASS_NAMES)
labels = decode_binary_labels(encoded_labels, num_class+ 1)
labels, mask = labels[:-1], ~labels[-1]
return labels, mask
def get_object_params(self, log_id, timestamp, vis_mask):
resolution=self.resolution
token = log_id +'_' + str(timestamp)
objs = self.obj_dict.item().get(token)
to_return=[]
center_width_orient=[]
obj_exists = False
for obj in objs:
if obj[-1] > 7:
continue
reshaped = np.reshape(np.copy(obj)[:8],(4,2))
reshaped[:,0] = (reshaped[:,0] - self.config.map_extents[0])/(self.config.map_extents[2]-self.config.map_extents[0])
reshaped[:,1] = (reshaped[:,1] - self.config.map_extents[1])/(self.config.map_extents[3]-self.config.map_extents[1])
reshaped[:,1] = 1 - reshaped[:,1]
coords = (np.clip(np.int64(reshaped[:,1]*(self.config.map_extents[3]-self.config.map_extents[1])/resolution),0,195),
np.clip(np.int64(reshaped[:,0]*(self.config.map_extents[2]-self.config.map_extents[0])/resolution),0,199))
inside = False
for k in range(4):
inside = inside | ((vis_mask[coords[0][k], coords[1][k]] > 0.5) &
((reshaped[k,1] >= 0) & (reshaped[k,1] <= 1)) &
((reshaped[k,0] >= 0) & (reshaped[k,0] <= 1)))
if inside:
# logging.error('INSIDE')
res_ar = np.zeros(5)
temp=np.squeeze(np.zeros((9,1),np.float32))
temp[:8] = reshaped.flatten()
temp[-1] = obj[-1]
to_return.append(np.copy(temp))
reshaped[:,1] = 1 - reshaped[:,1]
all_edges = np.zeros((4,2))
for k in range(4):
first_corner = reshaped[k%4]
second_corner = reshaped[(k+1)%4]
all_edges[k,:]=np.copy(second_corner - first_corner)
all_lengths = np.sqrt(np.square(all_edges[:,0]) + np.square(all_edges[:,1]))
long_side = np.argmax(all_lengths)
# egim = np.sign(all_edges[long_side][1]/(all_edges[long_side][0] + 0.00001))*\
# np.abs(all_edges[long_side][1])/(all_lengths[long_side] + 0.00001)
my_abs_cos = np.abs(all_edges[long_side][0])/(all_lengths[long_side] + 0.00001)
my_sign = np.sign(all_edges[long_side][1]/(all_edges[long_side][0] + 0.00001))
angle = np.arccos(my_abs_cos*my_sign)
center = np.mean(reshaped,axis=0)
long_len = np.max(all_lengths)
short_len = np.min(all_lengths)
res_ar[:2] = center
# res_ar[4] = my_abs_cos
# res_ar[5] = my_sign
res_ar[4] = angle
res_ar[2] = long_len
res_ar[3] = short_len
center_width_orient.append(np.copy(res_ar))
obj_exists = True
return np.array(to_return), np.array(center_width_orient), obj_exists
def load_seg_labels(self, log, timestamp):
camera = self.camera
label_path = os.path.join(self.config.argo_seg_label_root, log, camera,
f'{camera}_{timestamp}.png')
encoded_labels = np.array(Image.open(label_path))
bev_label = torch.tensor(np.flipud(encoded_labels).copy()).long()
bev_label = decode_binary_labels(bev_label, len(ARGOVERSE_CLASS_NAMES)+1)
return bev_label
def line_endpoints(self, coeffs, inc, out, roads):
try:
roads = list(roads)
new_coeffs = np.copy(np.array(coeffs))
for k in range(len(coeffs)):
if len(inc[k]) > 0:
other = roads.index(inc[k][0])
other_coef = coeffs[other]
dist1 = np.sum(np.abs(new_coeffs[k,0] - other_coef[0]))
dist2 = np.sum(np.abs(new_coeffs[k,-1] - other_coef[0]))
dist3 = np.sum(np.abs(new_coeffs[k,0] - other_coef[-1]))
dist4 = np.sum(np.abs(new_coeffs[k,-1] - other_coef[-1]))
min_one = np.squeeze(np.argmin(np.stack([dist1,dist2,dist3,dist4])))
if min_one == 0:
temp = np.copy(new_coeffs[other,0])
new_coeffs[other,0] = new_coeffs[other,-1]
new_coeffs[other,-1] = temp
elif min_one == 1:
temp = np.copy(new_coeffs[other,0])
new_coeffs[other,0] = new_coeffs[other,-1]
new_coeffs[other,-1] = temp
temp = np.copy(new_coeffs[k,0])
new_coeffs[k,0] = new_coeffs[k,-1]
new_coeffs[k,-1] = temp
elif min_one == 3:
temp = np.copy(new_coeffs[k,0])
new_coeffs[k,0] = new_coeffs[k,-1]
new_coeffs[k,-1] = temp
if len(out[k]) > 0:
other = roads.index(out[k][0])
other_coef = coeffs[other]
dist1 = np.sum(np.abs(new_coeffs[k,0] - other_coef[0]))
dist2 = np.sum(np.abs(new_coeffs[k,-1] - other_coef[0]))
dist3 = np.sum(np.abs(new_coeffs[k,0] - other_coef[-1]))
dist4 = np.sum(np.abs(new_coeffs[k,-1] - other_coef[-1]))
min_one = np.squeeze(np.argmin(np.stack([dist1,dist2,dist3,dist4])))
if min_one == 0:
temp = np.copy(new_coeffs[k,0])
new_coeffs[k,0] = new_coeffs[k,-1]
new_coeffs[k,-1] = temp
elif min_one == 2:
temp = np.copy(new_coeffs[other,0])
new_coeffs[other,0] = new_coeffs[other,-1]
new_coeffs[other,-1] = temp
temp = np.copy(new_coeffs[k,0])
new_coeffs[k,0] = new_coeffs[k,-1]
new_coeffs[k,-1] = temp
elif min_one == 3:
temp = np.copy(new_coeffs[other,0])
new_coeffs[other,0] = new_coeffs[other,-1]
new_coeffs[other,-1] = temp
return new_coeffs
except Exception as e:
logging.error('ENDPOINTS ' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(str((exc_type, fname, exc_tb.tb_lineno)))
return coeffs
# inc, out = self.get_line_orientation(k, roads, all_selected,selected_pred[k],selected_suc[k],selected_id)
#
def get_line_orientation(self, road, all_roads, all_selected,selected_pred,selected_suc,selected_id):
try:
# my_gt_id = selected_id[road]
# road_id=all_roads[road]
outgoing_id = []
for tok in selected_suc:
# logging.error('OUTGOING ' + tok)
if tok in selected_id:
outgoing_id.append(all_selected[selected_id.index(tok)])
incoming_id = []
for tok in selected_pred:
# logging.error('INCOMING ' + tok)
if tok in selected_id:
incoming_id.append(all_selected[selected_id.index(tok)])
return incoming_id, outgoing_id
except Exception as e:
logging.error('ORIENT ' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(str((exc_type, fname, exc_tb.tb_lineno)))
return [],[]
def get_connectivity(self,roads,outgoings, incomings):
try:
con_matrix = np.zeros((len(roads),len(roads)))
# logging.error('CON ROAD ' + str(roads))
for k in range(len(roads)):
con_matrix[k,k] = 0
outs = outgoings[k]
# logging.error('CON OUTS ' + str(outs))
for ou in outs:
sel = ou
if sel in roads:
ind = roads.index(sel)
# logging.error('INCOM ' + str(incomings[ind]))
# if not (ou in incomings[ind]):
# logging.error('OUT HAS NO IN')
con_matrix[k,ind] = 1
return con_matrix
except Exception as e:
logging.error('CONNECT ' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(str((exc_type, fname, exc_tb.tb_lineno)))
return None
def get_sorted_init_points(self, points):
'''
FROM BOTTOM UP AND RIGHT TO LEFT
'''
x = points[:,0]*self.config.rnn_size[1]
y = points[:,1]*self.config.rnn_size[0]
place = self.config.rnn_size[1]*x + y
sort_ind = np.argsort(place)
sort_ind = np.flip(sort_ind)
return points[sort_ind,:], sort_ind
def get_visible_mask(self, instrinsics, image_width, extents, resolution):
# Get calibration parameters
fu, cu = instrinsics[0, 0], instrinsics[0, 2]
# Construct a grid of image coordinates
x1, z1, x2, z2 = extents
x, z = np.arange(x1, x2, resolution), np.arange(z1, z2, resolution)
ucoords = x / z[:, None] * fu + cu
# Return all points which lie within the camera bounds
return (ucoords >= 0) & (ucoords < image_width)
def load_line_labels(self,timestamp, log_id, ind):
try:
am = self.am
camera = "ring_front_center"
loader = self.loader
log = loader.get(log_id)
ego_loc = np.copy(log.get_pose(ind).translation)
query_x = ego_loc[0]
query_y = ego_loc[1]
city_SE3_egovehicle_mat = np.copy(log.get_pose(ind).rotation)
transform_matrix = np.eye(3)
transform_matrix[:2, :2] = city_SE3_egovehicle_mat[:2,:2]
calib = log.get_calibration("ring_front_center")
city_name = log.city_name
lane_ids = am.get_lane_ids_in_xy_bbox(query_x, query_y, city_name, 80.0)
local_centerlines = []
successors = []
predecessors = []
for lane_id in lane_ids:
my_lane = am.city_lane_centerlines_dict[city_name][lane_id]
predecessors.append(my_lane.predecessors)
successors.append(my_lane.successors)
centerl = my_lane.centerline
if len(centerl[0]) == 2:
centerl = am.append_height_to_2d_city_pt_cloud(centerl, city_name)
local_centerlines.append(centerl)
my_area = np.zeros((196,200))
inter = 5
vis_mask = np.copy(np.uint8(np.flipud(self.get_visible_mask(np.copy(calib.K), np.copy(calib.camera_config.img_width),
self.config.map_extents, self.config.resolution))))
temp_vis_mask = np.flipud(vis_mask)
converted_lines = []
all_selected = []
selected_pred = []
selected_suc = []
selected_id = []
for centerline_city_fr_id in range(len(lane_ids)):
centerline_city_fr = local_centerlines[centerline_city_fr_id]
ground_heights = am.get_ground_height_at_xy(centerline_city_fr, city_name)
valid_idx = np.isnan(ground_heights)
centerline_city_fr = centerline_city_fr[~valid_idx]
centerline_egovehicle_fr = (centerline_city_fr - ego_loc) @ city_SE3_egovehicle_mat
centerline_egovehicle_fr = centerline_egovehicle_fr[:,:2]
centerline_egovehicle_fr[:,1] = -centerline_egovehicle_fr[:,1]+25
centerline_egovehicle_fr[:,0] = centerline_egovehicle_fr[:,0]-1
centerline_egovehicle_fr = centerline_egovehicle_fr*4
counter = 0
to_draw = []
to_sel_id = []
for k in range(len(centerline_egovehicle_fr)):
if ((centerline_egovehicle_fr[k][0] >= 0) & (centerline_egovehicle_fr[k][0] <= 195) & (centerline_egovehicle_fr[k][1] >= 0) & (centerline_egovehicle_fr[k][1] <= 199)):
if temp_vis_mask[int(np.clip(centerline_egovehicle_fr[k][0], 0, 195)), int(np.clip(centerline_egovehicle_fr[k][1], 0, 199))]>0:
to_draw.append((np.clip(centerline_egovehicle_fr[k][0], 0, 195), np.clip(centerline_egovehicle_fr[k][1], 0, 199)))
to_sel_id.append(k)
counter = counter + 1
if counter >= 3:
for k in range(len(to_draw)-1):
for m in range(inter):
my_area[int((m/inter)*to_draw[k][0] + (1 - m/inter)*to_draw[k+1][0]), int((m/inter)*to_draw[k][1] + (1 - m/inter)*to_draw[k+1][1])] = centerline_city_fr_id + 1
my_area[int(to_draw[0][0]), int(to_draw[0][1])] = centerline_city_fr_id + 1
my_area[int(to_draw[-1][0]), int(to_draw[-1][1])] = centerline_city_fr_id + 1
converted_lines.append(np.array(to_draw))
all_selected.append(centerline_city_fr_id + 1)
selected_pred.append(predecessors[centerline_city_fr_id])
selected_suc.append(successors[centerline_city_fr_id])
selected_id.append(lane_ids[centerline_city_fr_id])
# logging.error('CONVERTED ' + str(len(converted_lines)))
my_area = np.flipud(my_area)
vis_labels = np.stack([vis_mask,vis_mask],axis=0)
orig_img_centers = my_area
img_centers = orig_img_centers*np.uint16(vis_mask)
roads = np.unique(img_centers)[1:]
if len(converted_lines) < 1:
logging.error('NOT ENOUGH CONVERTED LINES')
return None,None,None,None,\
None,None,\
None, None,\
None,None,None,\
None, None, None, True, False
new_to_feed = []
for k in range(len(roads)):
new_to_feed.append(converted_lines[all_selected.index(roads[k])])
outgoings = []
incomings = []
coeffs_list = []
endpoints=[]
origs = []
bev_labels = self.load_seg_labels(log_id, timestamp)
if self.work_objects:
obj_to_return, center_width_orient, obj_exists = self.get_object_params(log_id, timestamp, vis_mask)
else:
obj_to_return, center_width_orient, obj_exists = None, None, None
to_remove = []
for k in range(len(roads)):
sel = img_centers == roads[k]
locs = np.where(sel)
sorted_x = locs[1]/img_centers.shape[1]
sorted_y = locs[0]/img_centers.shape[0]
if len(sorted_x) < 3:
to_remove.append(roads[k])
continue
points = np.array(list(zip(sorted_x,sorted_y)))
res = bezier.fit_bezier(points, self.n_control)[0]
#
inc, out = self.get_line_orientation(k, roads, all_selected,selected_pred[k],selected_suc[k],selected_id)
#
outgoings.append(out)
incomings.append(inc)
fin_res = res
fin_res[0][0] = converted_lines[k][0][1]/200
fin_res[0][1] = 1 - converted_lines[k][0][0]/196
fin_res[-1][0] = converted_lines[k][-1][1]/200
fin_res[-1][1] = 1 - converted_lines[k][-1][0]/196
endpoints.append(np.stack([fin_res[0], fin_res[-1]],axis=0))
# fin_res[0] = endpoints[0]
# fin_res[-1] = endpoints[-1]
fin_res = np.clip(fin_res,0,1)
coeffs_list.append(np.reshape(np.float32(fin_res),(-1)))
#
sel = np.float32(sel)
origs.append(sel)
if len(to_remove) > 0:
# logging.error('TO REMOVE ' + str(to_remove))
roads = list(roads)
for k in to_remove:
img_centers[img_centers == k] = 0
roads.remove(k)
# roads = list(set(roads) - set(to_remove))
else:
roads = list(roads)
if len(coeffs_list) == 0:
logging.error('COEFFS ENPTY')
return None,None,None,None,\
None,None,\
None, None,\
None,None,None,\
None, None, None, True, False
con_matrix = self.get_connectivity(roads,outgoings, incomings)
to_return_centers = torch.tensor(np.int64(img_centers)).long()
orig_img_centers = torch.tensor(np.int64(orig_img_centers)).long()
labels = torch.ones(len(roads))
endpoints = np.array(endpoints)
return obj_to_return, center_width_orient, con_matrix, np.reshape(endpoints,(endpoints.shape[0],-1)),\
orig_img_centers,np.stack(origs),\
vis_labels, bev_labels,\
to_return_centers,labels, roads,\
np.array(coeffs_list), outgoings, incomings, False, obj_exists
except Exception as e:
logging.error('LINE LABELS ' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(str((exc_type, fname, exc_tb.tb_lineno)))
return None,None,None,None,\
None,None,\
None, None,\
None,None,None,\
None, None, None, True, False
def my_line_maker(points,size=(196,200)):
res = np.zeros(size)
for k in range(len(points)):
res[np.min([int(points[k][1]*size[0]),int(size[0]-1)]),np.min([int(points[k][0]*size[1]),int(size[1]-1)])] = 1
return np.uint8(res)
| 39.104994 | 240 | 0.500638 | 29,881 | 0.97839 | 0 | 0 | 0 | 0 | 0 | 0 | 2,564 | 0.083953 |
ff235d7055fe081642d3b159d6f17203a8645409 | 3,782 | py | Python | src/textcode/markup.py | pombredanne/scancode-toolkit | 0d90a0498148997de94f92b00adf7e33079a41a8 | [
"Apache-2.0",
"CC0-1.0"
] | 3 | 2015-07-01T15:08:33.000Z | 2015-11-05T03:15:36.000Z | src/textcode/markup.py | pombredanne/scancode-toolkit | 0d90a0498148997de94f92b00adf7e33079a41a8 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/textcode/markup.py | pombredanne/scancode-toolkit | 0d90a0498148997de94f92b00adf7e33079a41a8 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
import logging
import chardet
import codecs
from commoncode import fileutils
from typecode import contenttype
"""
Extracts plain text from HTML and related files.
"""
logger = logging.getLogger(__name__)
bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
extensions = set(['.html', '.htm', '.php', '.phps', '.jsp', '.jspx'])
def is_markup(location):
return fileutils.file_extension(location) in extensions
def is_html(location):
"""
Return True if a file contains html markup. Used to handle multiple level
of markup is markup, such as html code highlighted in pygments, that would
require another pass to extract the actual text. This is really specific
to code and driven by the fact that code highlighting in HTML is rather
common such as on Github.
"""
raise NotImplementedError()
def convert_to_utf8(location):
"""
Convert the file at location to UTF-8 text.
Return the location of the converted file or None.
"""
if not contenttype.get_type(location).is_text:
return location
start = open(location, 'rb').read(4096)
encoding = chardet.detect(start)
if encoding:
encoding = encoding.get('encoding', None)
if encoding:
target = os.path.join(fileutils.get_temp_dir('markup'),
fileutils.file_name(location))
with codecs.open(location, 'rb', encoding=encoding,
errors='replace', buffering=16384) as inf:
with codecs.open(target, 'wb', encoding='utf-8') as outf:
outf.write(inf.read())
return target
else:
# chardet failed somehow to detect an encoding
return location
def convert_to_text(location, _retrying=False):
"""
Convert the markup file at location to plain text.
Return the location of the converted plain text file or None.
"""
if not is_markup(location):
return
temp_file = os.path.join(fileutils.get_temp_dir('markup'), 'text')
from bs4 import BeautifulSoup
with open(location, 'rb') as input_text:
soup = BeautifulSoup(input_text.read(), 'html5lib')
with codecs.open(temp_file, mode='wb', encoding='utf-8') as output_text:
output_text.write(soup.get_text())
return temp_file
| 36.718447 | 82 | 0.702538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,186 | 0.578001 |
ff23e35f84a4695ae6e12845521a5ceb13bc1d71 | 7,689 | py | Python | novajoin_tempest_plugin/ipa/ipa_client.py | vakwetu/novajoin_tempest_plugin | cde1888238fdd4f10b9d593e5f94da3ad9522d23 | [
"Apache-2.0"
] | null | null | null | novajoin_tempest_plugin/ipa/ipa_client.py | vakwetu/novajoin_tempest_plugin | cde1888238fdd4f10b9d593e5f94da3ad9522d23 | [
"Apache-2.0"
] | null | null | null | novajoin_tempest_plugin/ipa/ipa_client.py | vakwetu/novajoin_tempest_plugin | cde1888238fdd4f10b9d593e5f94da3ad9522d23 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import uuid
try:
from gssapi.exceptions import GSSError
from ipalib import api
from ipalib import errors
from ipapython.ipautil import kinit_keytab
ipalib_imported = True
except ImportError:
# ipalib/ipapython are not available in PyPy yet, don't make it
# a showstopper for the tests.
ipalib_imported = False
from oslo_config import cfg
from oslo_log import log as logging
from six.moves.configparser import SafeConfigParser
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class IPABase(object):
def __init__(self, backoff=0):
try:
self.ntries = CONF.connect_retries
except cfg.NoSuchOptError:
self.ntries = 1
if not ipalib_imported:
return
try:
self.keytab = CONF.keytab
except cfg.NoSuchOptError:
self.keytab = '/etc/novajoin/krb5.keytab'
self.ccache = "MEMORY:" + str(uuid.uuid4())
os.environ['KRB5CCNAME'] = self.ccache
os.environ['KRB5_CLIENT_KTNAME'] = '/home/stack/krb5.keytab'
if self._ipa_client_configured() and not api.isdone('finalize'):
api.bootstrap(context='novajoin')
api.finalize()
self.batch_args = list()
self.backoff = backoff
(_hostname, domain, realm) = self.get_host_domain_and_realm()
self.domain = domain
self.realm = realm
def get_host_domain_and_realm(self):
"""Return the hostname and IPA realm name.
IPA 4.4 introduced the requirement that the schema be
fetched when calling finalize(). This is really only used by
the ipa command-line tool but for now it is baked in.
So we have to get a TGT first but need the hostname and
realm. For now directly read the IPA config file which is
in INI format and pull those two values out and return as
a tuple.
"""
config = SafeConfigParser()
config.read('/etc/ipa/default.conf')
hostname = config.get('global', 'host')
realm = config.get('global', 'realm')
domain = config.get('global', 'domain')
return hostname, domain, realm
def __backoff(self):
LOG.debug("Backing off %s seconds", self.backoff)
time.sleep(self.backoff)
if self.backoff < 1024:
self.backoff = self.backoff * 2
def __get_connection(self):
"""Make a connection to IPA or raise an error."""
tries = 0
while (tries <= self.ntries) or (self.backoff > 0):
if self.backoff == 0:
LOG.debug("Attempt %d of %d", tries, self.ntries)
if api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.disconnect()
try:
api.Backend.rpcclient.connect()
# ping to force an actual connection in case there is only one
# IPA master
api.Command[u'ping']()
except (errors.CCacheError,
errors.TicketExpired,
errors.KerberosError) as e:
LOG.debug("kinit again: %s", e)
# pylint: disable=no-member
try:
kinit_keytab(str('nova/%s@%s' %
(api.env.host, api.env.realm)),
self.keytab,
self.ccache)
except GSSError as e:
LOG.debug("kinit failed: %s", e)
if tries > 0 and self.backoff:
self.__backoff()
tries += 1
except errors.NetworkError:
tries += 1
if self.backoff:
self.__backoff()
else:
return
def _call_ipa(self, command, *args, **kw):
"""Make an IPA call."""
if not api.Backend.rpcclient.isconnected():
self.__get_connection()
if 'version' not in kw:
kw['version'] = u'2.146' # IPA v4.2.0 for compatibility
while True:
try:
result = api.Command[command](*args, **kw)
LOG.debug(result)
return result
except (errors.CCacheError,
errors.TicketExpired,
errors.KerberosError):
LOG.debug("Refresh authentication")
self.__get_connection()
except errors.NetworkError:
if self.backoff:
self.__backoff()
else:
raise
def _ipa_client_configured(self):
"""Determine if the machine is an enrolled IPA client.
Return boolean indicating whether this machine is enrolled
in IPA. This is a rather weak detection method but better
than nothing.
"""
return os.path.exists('/etc/ipa/default.conf')
class IPAClient(IPABase):
def find_host(self, hostname):
params = [hostname.decode('UTF-8')]
return self._call_ipa('host_find', *params)
def show_host(self, hostname):
params = [hostname.decode('UTF-8')]
return self._call_ipa('host_show', *params)
def find_service(self, service_principal):
params = [service_principal.decode('UTF-8')]
service_args = {}
return self._call_ipa('service_find', *params, **service_args)
def show_service(self, service_principal):
params = [service_principal.decode('UTF-8')]
service_args = {}
return self._call_ipa('service_show', *params, **service_args)
def get_service_cert(self, service_principal):
params = [service_principal.decode('UTF-8')]
service_args = {}
result = self._call_ipa('service_find', *params, **service_args)
serviceresult = result['result'][0]
if 'serial_number' in serviceresult:
return serviceresult['serial_number']
else:
return None
def service_managed_by_host(self, service_principal, host):
"""Return True if service is managed by specified host"""
params = [service_principal.decode('UTF-8')]
service_args = {}
try:
result = self._call_ipa('service_show', *params, **service_args)
except errors.NotFound:
raise KeyError
serviceresult = result['result']
for candidate in serviceresult.get('managedby_host', []):
if candidate == host:
return True
return False
def host_has_services(self, service_host):
"""Return True if this host manages any services"""
LOG.debug('Checking if host ' + service_host + ' has services')
params = []
service_args = {'man_by_host': service_host}
result = self._call_ipa('service_find', *params, **service_args)
return result['count'] > 0
def show_cert(self, serial_number):
params = [serial_number]
return self._call_ipa('cert_show', *params)
| 35.43318 | 78 | 0.588503 | 6,526 | 0.848745 | 0 | 0 | 0 | 0 | 0 | 0 | 2,336 | 0.303811 |
ff244389ab03819a221d71c11f19a99f1ecbe7db | 4,085 | py | Python | old/ctest.py | ostapstephan/SeniorProject | f0d3f8067dec98474641c6ec3696dbd86f066a51 | [
"MIT"
] | 1 | 2018-10-03T00:30:34.000Z | 2018-10-03T00:30:34.000Z | old/ctest.py | ostapstephan/SeniorProject | f0d3f8067dec98474641c6ec3696dbd86f066a51 | [
"MIT"
] | null | null | null | old/ctest.py | ostapstephan/SeniorProject | f0d3f8067dec98474641c6ec3696dbd86f066a51 | [
"MIT"
] | null | null | null | # import lib.pbcvt as pbcvt
import cv2
import numpy as np
import sys
from time import time
def distance(o1, o2):
(x1,y1,w1,h1) = o1
(x2,y2,w2,h2) = o2
c1 = (x1+w1/2,y1+h1/2)
c2 = (x2+w2/2,y2+h2/2)
return np.hypot(c1[0]-c2[0],c1[1]-c2[1])
cv2.namedWindow("preview")
cv2.namedWindow("preview2")
cv2.namedWindow("preview3")
vc = cv2.VideoCapture(int(sys.argv[1]))
vc.set(3,int(sys.argv[2]))
vc.set(4,int(sys.argv[3]))
print(vc.get(3))
print(vc.get(4))
# vout = None
# if (int(sys.argv[5])):
# fourcc = cv2.VideoWriter_fourcc(*'x264')
# vout = cv2.VideoWriter('pupiltest.mp4', fourcc, 24.0, (int(vc.get(3)),int(vc.get(4))))
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
ptime = time()
nf = 0
# face_cascade = cv2.CascadeClassifier('trained/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('trained/haarcascade_eye.xml')
glass_cascade = cv2.CascadeClassifier('trained/haarcascade_eye_tree_eyeglasses.xml')
reye_cascade = cv2.CascadeClassifier('trained/haarcascade_righteye_2splits.xml')
leye_cascade = cv2.CascadeClassifier('trained/haarcascade_lefteye_2splits.xml')
# face = None
# flost = 0
while rval:
roi_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
equalized = cv2.equalizeHist(roi_gray)
roi_color = frame
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# flost = flost+1
# for f in faces:
# if face is not None:
# # print("Face: " + str(distance(f,face)))
# if not (1 < distance(f,face) < 40):
# continue
# face = f
# flost = 0
# if flost < 5 and face is not None:
# (x,y,w,h) = face
# x+=10
# y+=10
# w = int(w*0.85)
# h = int(h*0.5)
# cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for e in eyes:
(ex,ey,ew,eh) = e
# ex += 10
# ey += 10
# ew -= 10
# eh -= 10
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
eye_roi_gray = roi_gray[ey:ey+eh, ex:ex+ew]
eye_roi_color = roi_color[ey:ey+eh, ex:ex+ew]
hist = cv2.calcHist([eye_roi_gray],[0],None,[256],[0,256])
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Apply KMeans
compactness,labels,centers = cv2.kmeans(hist,2,None,criteria,100,cv2.KMEANS_RANDOM_CENTERS)
print(np.sqrt(compactness)/10)
print(centers)
# center = pbcvt.findPupil(roi_gray, int(ex), int(ey), int(ew), int(eh))
ret, thresh = cv2.threshold(eye_roi_gray, centers[0]-10, 255, 0)
# thresh = cv2.adaptiveThreshold(eye_roi_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(eye_roi_color, contours, -1, (0,0,255), 3)
for cont in contours:
if len(cont) > 5 and cv2.contourArea(cont) > 1000:
ellipse = cv2.fitEllipse(cont)
cv2.ellipse(eye_roi_color, ellipse, (0,0,255),2)
cv2.circle(eye_roi_color, (int(ellipse[0][0]),int(ellipse[0][1])), 2, (255,0,0), 3)
# cv2.circle(eye_roi_color, center, 2, (0,255,0), 3)
# else:
# face = None
cv2.imshow("preview", roi_gray)
cv2.imshow("preview2", equalized)
cv2.imshow("preview3", thresh)
# if vout:
# vout.write(frame)
nf = nf + 1
if time() - ptime > 5:
print(str(nf/(time()-ptime)))
ptime = time()
nf = 0
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
elif key == 32:
cv2.imwrite('testimage.png',frame);
rval, frame = vc.read()
cv2.destroyWindow("preview")
cv2.destroyWindow("preview2")
cv2.destroyWindow("preview3")
vc.release()
# if vout:
# vout.release()
| 33.483607 | 118 | 0.60563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,613 | 0.394859 |
ff24c50a953f9de8e5c507018c8a393f88c6968a | 3,394 | py | Python | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | 1 | 2021-07-07T11:28:56.000Z | 2021-07-07T11:28:56.000Z | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | null | null | null | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import AutoGemmParameters
import Common
################################################################################
# Auto-Gemm
################################################################################
def writeOfflineCompilation(args):
print "AutoGemm.py: Generating list of kernels to pre-compile."
if not os.path.exists( Common.getIncludePath() ):
os.makedirs( Common.getIncludePath() )
ocFileName = Common.getIncludePath() + "AutoGemmKernelsToPreCompile.h"
ocFile = open(ocFileName, "w")
ocFile.write( Common.getAutoGemmHeader() )
fileStr = "\n/*precision, order, transA, transB, beta, tileNumRows, tileNumCols, unroll*/\n"
fileStr += "\nunsigned int gemmPreCompile[][8] = {\n"
count = 0
for precision in args.precisions:
ocFile.write( fileStr )
fileStr = ""
validTiles = AutoGemmParameters.getTilesForPrecision(precision)
for order in args.orders:
for transpose in args.transposes:
transA = transpose[0]
transB = transpose[1]
if (transA=="C" or transB=="C") and (precision=="s" or precision=="d"):
# real precision doesn't have conjugate transpose
continue
for beta in args.betas:
for tile in validTiles:
# print combination
kernelStr = " { %1u, %1u, %1u, %1u, %1u, %3u, %3u, %2u },\n" \
% (
Common.precisionInt[precision],
Common.orderInt[order],
Common.transposeInt[transA],
Common.transposeInt[transB],
beta,
tile.macroTileNumRows,
tile.macroTileNumCols,
tile.unroll
)
fileStr += kernelStr
#print kernelStr
count+=1
if count is 0:
fileStr += " { %1u, %1u, %1u, %1u, %1u, %3u, %3u, %2u },\n" \
% ( 0, 0, 0, 0, 0, 0, 0, 0 )
fileStr += "};\n"
fileStr += "unsigned int gemmPreCompileNum = " + str(count) + ";\n"
ocFile.write( fileStr )
ocFile.close()
count *= 4
print "AutoGemm.py: %u kernels will be pre-compiled." % count
################################################################################
# Main
################################################################################
if __name__ == "__main__":
# parse arguments
ap = argparse.ArgumentParser(description="Which gemm kernels to compile offline.")
ap.add_argument("--output-path", dest="output" )
ap.add_argument("--precisions", dest="precisions", action="store", nargs="+", choices=AutoGemmParameters.precisions )
ap.add_argument("--orders", dest="orders", action="store", nargs="+", choices=AutoGemmParameters.orders )
ap.add_argument("--transposes", dest="transposes", action="store", nargs="+", choices=AutoGemmParameters.getTransposeChoices() )
ap.add_argument("--betas", dest="betas", action="store", nargs="+", type=int, choices=AutoGemmParameters.betas )
args = ap.parse_args()
if args.output:
Common.setOutputPath(args.output)
else:
print "Warning: No output path specified; default is working directory."
# write offline compilation header
if args.precisions is None:
args.precisions = []
if args.transposes is None:
args.transposes = []
if args.orders is None:
args.orders = []
if args.betas is None:
args.betas = []
writeOfflineCompilation(args)
| 36.891304 | 130 | 0.570418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,157 | 0.340896 |
ff2566df6543c6c812f059453757e928f1cb03ba | 604 | py | Python | scrap/main.py | talesconrado/SpotPer | 682d1f087337980ce7126b8dc5877f4a2a1aad83 | [
"MIT"
] | 1 | 2021-04-13T01:28:49.000Z | 2021-04-13T01:28:49.000Z | scrap/main.py | talesconrado/SpotPer | 682d1f087337980ce7126b8dc5877f4a2a1aad83 | [
"MIT"
] | null | null | null | scrap/main.py | talesconrado/SpotPer | 682d1f087337980ce7126b8dc5877f4a2a1aad83 | [
"MIT"
] | null | null | null | import musics
import sys
def main():
filename = sys.argv[1]
start_id= int(sys.argv[2])
cod_album = sys.argv[3]
# filename = input('Digite o caminho do arquivo html a ser analisado: ')
# start_id = int(input('Digite o codfaixa inicial: '))
# cod_album = input('Digite o código do album que a faixa faz parte: ')
html = musics.getHTML(filename)
music_names = musics.getMusicNames(html)
times = musics.getMusicDuration(html)
output_filename = filename.split('.')[0] + '.sql'
musics.saveValues(output_filename, music_names, times, cod_album, start_id)
main()
| 28.761905 | 79 | 0.682119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.342149 |
ff26356a8a3d9408da1d439502b5bdb9bed76c96 | 8,285 | py | Python | cct/core2/config.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2015-11-04T16:37:39.000Z | 2015-11-04T16:37:39.000Z | cct/core2/config.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | null | null | null | cct/core2/config.py | awacha/cct | be1adbed2533df15c778051f3f4f9da0749c873a | [
"BSD-3-Clause"
] | 1 | 2020-03-05T02:50:43.000Z | 2020-03-05T02:50:43.000Z | import logging
import os
import pathlib
import pickle
from typing import Dict, Any, Union, Tuple, KeysView, ValuesView, ItemsView, Optional
from PyQt5 import QtCore
PathLike = Union[os.PathLike, str, pathlib.Path]
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Config(QtCore.QObject):
"""A hierarchical configuration store
This mimics a Python dictionary, i.e. supporting ['item']-like indexing, but only `str` keys are supported.
Changes are monitored and if a new value is given, the `changed` signal is emitted.
Items can be of any type. Dictionaries are handled differently. Whenever a dictionary is assigned to a key, it is
converted to a `Config` instance.
"""
changed = QtCore.pyqtSignal(object, object)
filename: Optional[str] = None
_data: Dict[str, Any]
_modificationcount: int = 0
_autosavetimeout: float = 0.1
_autosavetimer: Optional[int] = None
def __init__(self, dicorfile: Union[None, Dict[str, Any], str] = None):
super().__init__()
self._data = {}
if dicorfile is None:
pass
elif isinstance(dicorfile, dict):
self.__setstate__({} if dicorfile is None else dicorfile.copy())
elif isinstance(dicorfile, str):
self.load(dicorfile, update=False)
self.changed.connect(self.onChanged)
def autosave(self):
if self.filename is not None:
if self._autosavetimer is not None:
self.killTimer(self._autosavetimer)
self._autosavetimer = None
self._autosavetimer = self.startTimer(int(self._autosavetimeout*1000), QtCore.Qt.PreciseTimer)
def timerEvent(self, timerEvent: QtCore.QTimerEvent) -> None:
if timerEvent.timerId() == self._autosavetimer:
# do autosave
self.killTimer(timerEvent.timerId())
self._autosavetimer = None
self.save(self.filename)
def onChanged(self, path: Tuple[str, ...], value: Any):
self.autosave()
def __setitem__(self, key: Union[str, Tuple[str]], value: Any):
if isinstance(key, tuple) and len(key) > 1:
subconfig = self._data[key[0]]
assert isinstance(subconfig, Config)
return subconfig.__setitem__(key[1:], value)
elif isinstance(key, tuple) and len(key) == 1:
key = key[0]
elif isinstance(key, tuple) and len(key) == 0:
raise ValueError('Empty tuples cannot be Config keys!')
if not isinstance(value, dict):
if key not in self._data:
self._data[key] = value
self.changed.emit((key,), value)
else:
# key already present, see if they are different
if (not isinstance(self._data[key], type(value))) or (self._data[key] != value):
self._data[key] = value
# value changed, emit the signal
self.changed.emit((key, ), value)
else:
pass
else:
# convert it to a Config instance
cnf = Config(value)
if (key not in self._data) or (not isinstance(self._data[key], Config)):
self._data[key] = cnf
self._data[key].changed.connect(self._subConfigChanged)
self.changed.emit((key,), cnf)
elif cnf is not self._data[key]:
self._data[key].update(cnf)
else:
# setting the same config: do nothing.
pass
self.autosave()
def __delitem__(self, key: Union[str, Tuple[str, ...]]):
logger.debug(f'Deleting key {key}')
if isinstance(key, str):
key = (key,)
logger.debug(f'Key normalized to {key}')
dic = self
for k in key[:-1]:
dic = dic[k]
assert isinstance(dic, Config)
if isinstance(dic._data[key[-1]], Config):
try:
dic._data[key[-1]].changed.disconnect(self._subConfigChanged)
except TypeError:
pass
del dic._data[key[-1]]
self.autosave()
def __getitem__(self, item: Union[str, Tuple[str, ...]]):
if isinstance(item, str):
return self._data[item]
elif isinstance(item, int):
return self._data[item]
else:
dic = self
for key in item:
dic = dic[key]
return dic
def __getstate__(self) -> Dict[str, Any]:
dic = {}
for k in self:
if isinstance(self[k], Config):
dic[k] = self[k].__getstate__()
else:
dic[k] = self[k]
return dic
def keys(self) -> KeysView:
return self._data.keys()
def values(self) -> ValuesView:
return self._data.values()
def items(self) -> ItemsView:
return self._data.items()
def _subConfigChanged(self, path, newvalue):
cnf = self.sender()
# find the key for this `Config` instance
try:
key = [k for k in self.keys() if self[k] is cnf][0]
except IndexError:
# this `Config` instance does not belong to us, disconnect the signal.
cnf.changed.disconnect(self._subConfigChanged)
else:
# extend the path with the key and re-emit the signal.
self.changed.emit((key,) + path, newvalue)
self.autosave()
def update(self, other: Union["Config", Dict]):
for key in other:
logger.debug(f'Updating {key=}')
if isinstance(other[key], Config) or isinstance(other[key], dict):
if (key in self) and isinstance(self[key], Config):
logger.debug(f'Key {key} is a Config')
self[key].update(other[key])
else:
logger.debug(f'scalar -> dict of key {key}')
self[key] = other[key]
else:
logger.debug(f'simple update of key {key}')
self[key] = other[key]
self.autosave()
def __iter__(self):
return self._data.__iter__()
def __contains__(self, item: Union[str, Tuple[str, ...]]):
if isinstance(item, str):
return item in self._data
else:
dic = self
for key in item[:-1]:
dic = dic[key]
return item[-1] in dic
def load(self, picklefile: PathLike, update: bool=True):
self.filename = picklefile
if not update:
logger.debug(f'Loading configuration from {picklefile}')
with open(picklefile, 'rb') as f:
data = pickle.load(f)
logger.debug('Loaded a pickle file')
self.__setstate__(data)
else:
logger.debug(f'Loading configuration with update')
other = Config(picklefile)
self.update(other)
logger.info(f'Loaded configuration from {picklefile}')
def save(self, picklefile: Optional[PathLike] = None):
if picklefile is None:
picklefile = self.filename
#logger.debug(f'Saving configuration to {picklefile}')
dirs, filename = os.path.split(picklefile)
os.makedirs(dirs, exist_ok=True)
with open(picklefile, 'wb') as f:
pickle.dump(self.__getstate__(), f)
logger.info(f'Saved configuration to {picklefile}')
def __setstate__(self, dic: Dict[str, Any]):
self._data = dic
for key in self._data:
# convert dictionaries to `Config` instances and connect to their changed signals.
if isinstance(self._data[key], dict):
self._data[key] = Config(self._data[key])
self._data[key].changed.connect(self._subConfigChanged)
asdict = __getstate__
def __str__(self):
return str(self.__getstate__())
def __repr__(self):
return repr(self.__getstate__())
def __len__(self):
return len(self._data)
def setdefault(self, key: str, value: Any) -> Any:
try:
return self._data[key]
except KeyError:
self[key]=value
self.autosave()
return self[key]
| 35.711207 | 117 | 0.569584 | 7,998 | 0.965359 | 0 | 0 | 0 | 0 | 0 | 0 | 1,249 | 0.150754 |
ff27a929c9309575aaca50c911b92ee2c9b0e9b2 | 844 | py | Python | bench-scripts/bench_strawman_jl_varying_num_dim.py | holmes-anonymous-submission/holmes-library | 35d01400c6694681ed61b2b3e1f6c7eb6b660e90 | [
"MIT"
] | null | null | null | bench-scripts/bench_strawman_jl_varying_num_dim.py | holmes-anonymous-submission/holmes-library | 35d01400c6694681ed61b2b3e1f6c7eb6b660e90 | [
"MIT"
] | null | null | null | bench-scripts/bench_strawman_jl_varying_num_dim.py | holmes-anonymous-submission/holmes-library | 35d01400c6694681ed61b2b3e1f6c7eb6b660e90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from common import *
configure_network()
subprocess.run(["cmake", "."])
subprocess.run(["make"])
print("running the bench_strawman_jl_varying_num_dim")
k = 41
# 41 for 100(k), 44 for 200(k)
num_records = 100
size_of_each_dimension = 10
print("running the case for " + str(num_records) + " entries, with k value " + str(k) + ", and each dimension has size " + str(size_of_each_dimension))
time.sleep(5)
for i in range(5):
num_dimensions = 1 + i * 1
write_configure_info(str(num_dimensions) + " " + str(size_of_each_dimension) + " " + str(k) + " " + str(num_records))
subprocess.run(["bin/bench_strawman_jl", os.getenv("EMP_MY_PARTY_ID"), "5000"])
copy_benchmark_result_to_log("bench_strawman_jl varying " + str(num_dimensions) + " " + str(size_of_each_dimension) + " " + str(k) + " " + str(num_records)) | 40.190476 | 160 | 0.694313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.338863 |
ff2a2e9042dfa861f9dd3f86cd1a00194ad688f3 | 5,184 | py | Python | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | #!/usr/local/apps/networking/bin/python2.7
# script will test all server clinet pairs with iperf
### this is the prd script
# pmw 2/18/16
import os
import re
import sys
import datetime
#from subprocess import Popen, PIP
import subprocess
import logging
from pprint import pprint
import argparse
import socket
import Queue
import threading
import thread
import signal
import time
from httplib import HTTPSConnection
from base64 import b64encode
#from servicenow import ChangeRequest, WebService, CSVService, ConfigurationItemFactory, CustomerFactory
socket.setdefaulttimeout(2)
report =''
#constants for sn
def get_time():
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def run_script(list,fo,lock):
global report
try:
cmd = 'ssh -q -i ttnet_key -o \"StrictHostKeyChecking no\" -l ttnetsftp ';
cmd += list[1]
cmd += ' \"iperf -s -p51281 -u -D\"'
history = cmd + '\n'
#os.spawnl(os.P_NOWAIT,cmd)
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE)
tmp = proc.stdout.read()
print(tmp)
cmd = 'ssh -q -i ttnet_key -o \"StrictHostKeyChecking no\" -l ttnetsftp ';
cmd += list[2]
cmd += ' \"taskkill /f /im iperf.exe;iperf -c'
cmd += list[1]
cmd += ' -b'
bw = str(list[5])
bw = re.sub(r'\D','',bw)
if bw =='':
bw =10
if float(bw) >100:
cmd += '10'
else:
cmd += str(bw)
print('%s %s -> %s list of 5 (bandwidth) is %s' % (list[0],list[1],list[2],bw))
cmd += 'M'
cmd += ' -p51281 -i1 -t10 -r\"'
history += cmd + '\n'
# add timeout. kill after 60 sec
start = datetime.datetime.now()
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while proc.poll() is None:
time.sleep(0.5)
now = datetime.datetime.now()
if (now - start).seconds> 60:
os.kill(proc.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
tmp = proc.stdout.read()
# remove
# end remove
summary = get_summ(list[1],cmd,tmp)
cmd = 'ssh -q -i ttnet_key -o "StrictHostKeyChecking no" -l ttnetsftp ';
cmd += list[1]
cmd += ' "taskkill /f /im iperf.exe"'
history += cmd + '\n' + list[0] + ','+ list[4]+ ','+ list[5] + ','+ str(summary)
rep = list[0] + ','+ list[4]+ ','+ list[5] + ','+ str(summary) + '\n'
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE)
tmp = proc.stdout.read()
print(tmp)
lock.acquire_lock()
if fo.closed:
fo = open('iperf_new.csv','a')
report += rep
fo.write(rep)
print "%s\n" % history
lock.release_lock()
return rep
except socket.error, e:
lock.acquire_lock()
print "%s,%s" % (ip, e)
# kill server iperf
lock.release_lock()
return False
def get_summ(ip,cmd,tmp):
lines = tmp.split('\n')
print(len(lines))
i =0
sum = 0
for line in lines:
if re.search(r'Mbits/sec.*ms.*\%',line):
line = re.sub(r'^.*Bytes\s+','',line)
print(line)
a = line.split()
i += 1
rate = float(a[0])
sum += rate
print('lin -> %s, %i' % (rate,i))
if i==0:
return 0
return round((sum / i),2)
def initLogger():
logger = logging.getLogger()
logFormatter = logging.Formatter('%(levelname)s: %(message)s')
logStreamHandler = logging.StreamHandler()
logStreamHandler.setFormatter(logFormatter)
logger.addHandler(logStreamHandler)
def get_pairs(inp):
f = open(inp)
lines = f.read().split('\n')
f.close()
list = []
for line in lines:
a = line.split(',')
#print('line %s\n' % (line))
#print('a is -> %s\n' % (a))
if len(a) >2:
list.append(a)
return list
def run_unsched(list,frep,lock):
for items in list:
#print(items[0]+' '+items[1]+' '+items[2]+' '+items[5]+'M' )
rep = run_script(items, frep, lock)
frep.write(rep)
print(rep)
return True
if __name__=='__main__':
global report
report =''
initLogger()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='test ap xtr pairs')
parser.add_argument('-f', '--file', nargs=1 ,help='file. format: group,from,to', required=False)
parser.add_argument('-o', '--output', nargs=1 ,help='file. output file', required=False)
args = parser.parse_args()
clientIps = []
print('args are -> %s' % args)
lock = thread.allocate_lock()
if not args.output:
frep = open('iperf_results.csv','w')
else:
frep = open(args.output[0],'w')
if not args.file:
list = get_pairs("iperf_pairs_short.csv")
else:
print('args are -> %s' % args.file)
list = get_pairs(args.file[0])
run_unsched(list,frep,lock)
frep.close()
frep = open('iperf_rep_2.csv','w')
frep.write(report)
frep.close()
| 29.622857 | 104 | 0.565586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,269 | 0.244792 |
ff2a3163d6bee3ccd183a8a62b54b56207c9096b | 1,142 | py | Python | Python/audio_to_text.py | udbhavdave/Major | e6e3f3ec27d5294661e9634496a8d5c9ebbf4c1a | [
"MIT"
] | 3 | 2020-12-13T12:21:10.000Z | 2021-12-17T10:03:58.000Z | Python/audio_to_text.py | udbhavdave/Major | e6e3f3ec27d5294661e9634496a8d5c9ebbf4c1a | [
"MIT"
] | null | null | null | Python/audio_to_text.py | udbhavdave/Major | e6e3f3ec27d5294661e9634496a8d5c9ebbf4c1a | [
"MIT"
] | null | null | null | # import speech_recognition as sr
# import sys
#
# # read filename from arguments
# filename = ("C:\\Users\\utkar\\Downloads\\crowd.mp3")
#
# # initialize the recognizer
# r = sr.Recognizer()
#
# # open the file
# with sr.AudioFile(filename) as source:
# # listen for the data (load audio to memory)
# audio_data = r.record(source)
# # recognize (convert from speech to text)
# text = r.recognize_google(audio_data)
# print(text)
#Python 2.x program to transcribe an Audio file
import speech_recognition as sr
import connection
AUDIO_FILE = ("..\\audio\\Welcome.wav")
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
#reads the audio file. Here we use record instead of
#listen
audio = r.record(source)
try:
print("The audio file contains: " + r.recognize_google(audio))
connection.insertAudio(2, r.recognize_google(audio), AUDIO_FILE)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
# except sr.RequestError as e:
# print("Could not request results from Google Speech
# Recognition service; {0}".format(e))
| 26.55814 | 65 | 0.722417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 810 | 0.709282 |
ff2cc3e4fa0010425f995d6994abc80dcad5bb40 | 92 | py | Python | codes_auto/1203.print-in-order.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1203.print-in-order.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1203.print-in-order.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=1203 lang=python3
#
# [1203] print-in-order
#
None
# @lc code=end | 13.142857 | 42 | 0.663043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.891304 |
ff2f9459306ff0dcba3c3e1ea4e1631dc6e77890 | 1,777 | py | Python | Toolkits/Discovery/meta/searx/searx/engines/digbt.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | Toolkits/Discovery/meta/searx/searx/engines/digbt.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | searx/engines/digbt.py | xu1991/open | 5398dab4ba669b3ca87d9fe26eb24431c45f153e | [
"CC0-1.0"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | """
DigBT (Videos, Music, Files)
@website https://digbt.org
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, magnetlink
"""
from sys import version_info
from lxml import html
from searx.engines.xpath import extract_text
from searx.utils import get_torrent_size
from searx.url_utils import urljoin
if version_info[0] == 3:
unicode = str
categories = ['videos', 'music', 'files']
paging = True
URL = 'https://digbt.org'
SEARCH_URL = URL + '/search/{query}-time-{pageno}'
FILESIZE = 3
FILESIZE_MULTIPLIER = 4
def request(query, params):
params['url'] = SEARCH_URL.format(query=query, pageno=params['pageno'])
return params
def response(resp):
dom = html.fromstring(resp.text)
search_res = dom.xpath('.//td[@class="x-item"]')
if not search_res:
return list()
results = list()
for result in search_res:
url = urljoin(URL, result.xpath('.//a[@title]/@href')[0])
title = extract_text(result.xpath('.//a[@title]'))
content = extract_text(result.xpath('.//div[@class="files"]'))
files_data = extract_text(result.xpath('.//div[@class="tail"]')).split()
filesize = get_torrent_size(files_data[FILESIZE], files_data[FILESIZE_MULTIPLIER])
magnetlink = result.xpath('.//div[@class="tail"]//a[@class="title"]/@href')[0]
results.append({'url': url,
'title': title,
'content': content,
'filesize': filesize,
'magnetlink': magnetlink,
'seed': 'N/A',
'leech': 'N/A',
'template': 'torrent.html'})
return results
| 28.206349 | 90 | 0.591446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 555 | 0.312324 |
ff3079fc8a9e3d739eb7e1706631d0a9711499a3 | 7,769 | py | Python | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | import logging
import os
from pathlib import Path
import re
from typing import Dict, List, Optional, Tuple
from calvin_agent.datasets.base_dataset import BaseDataset
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_rgb,
process_state,
)
import numpy as np
import torch
logger = logging.getLogger(__name__)
class NpzDataset(BaseDataset):
"""
Dataset Loader that uses a shared memory cache
parameters
----------
datasets_dir: path of folder containing episode files (string must contain 'validation' or 'training')
save_format: format of episodes in datasets_dir (.pkl or .npz)
obs_space: DictConfig of the observation modalities of the dataset
max_window_size: maximum length of the episodes sampled from the dataset
"""
def __init__(self, *args, skip_frames: int = 0, n_digits: Optional[int] = None, **kwargs): # type: ignore
super().__init__(*args, **kwargs)
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.max_batched_length_per_demo,
self.lang_ann,
) = self.load_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup, self.max_batched_length_per_demo = self.load_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = self.lookup_naming_pattern(n_digits)
def lookup_naming_pattern(self, n_digits):
it = os.scandir(self.abs_datasets_dir)
while True:
filename = Path(next(it))
if self.save_format in filename.suffix:
break
aux_naming_pattern = re.split(r"\d+", filename.stem)
naming_pattern = [filename.parent / aux_naming_pattern[0], filename.suffix]
n_digits = n_digits if n_digits is not None else len(re.findall(r"\d+", filename.stem)[0])
assert len(naming_pattern) == 2
assert n_digits > 0
return naming_pattern, n_digits
def get_episode_name(self, idx: int) -> Path:
"""
Convert frame idx to file name
"""
return Path(f"{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}")
def zip_sequence(self, start_idx: int, end_idx: int, idx: int) -> Dict[str, np.ndarray]:
"""
Load consecutive individual frames saved as npy files and combine to episode dict
parameters:
-----------
start_idx: index of first frame
end_idx: index of last frame
returns:
-----------
episode: dict of numpy arrays containing the episode where keys are the names of modalities
"""
episodes = [self.load_episode(self.get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
episode = {key: np.stack([ep[key] for ep in episodes]) for key, _ in episodes[0].items()}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
return episode
def get_sequences(self, idx: int, window_size: int) -> Dict:
"""
parameters
----------
idx: index of starting frame
window_size: length of sampled episode
returns
----------
seq_state_obs: numpy array of state observations
seq_rgb_obs: tuple of numpy arrays of rgb observations
seq_depth_obs: tuple of numpy arrays of depths observations
seq_acts: numpy array of actions
"""
start_file_indx = self.episode_lookup[idx]
end_file_indx = start_file_indx + window_size
episode = self.zip_sequence(start_file_indx, end_file_indx, idx)
seq_state_obs = process_state(episode, self.observation_space, self.transforms, self.proprio_state)
seq_rgb_obs = process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = {"lang": torch.from_numpy(episode["language"]) if self.with_lang else torch.empty(0)}
seq_dict = {**seq_state_obs, **seq_rgb_obs, **seq_depth_obs, **seq_acts, **info, **seq_lang} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def load_file_indices_lang(self, abs_datasets_dir: Path) -> Tuple[List, List, List, np.ndarray]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print("trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).reshape(
-1
)[0]
except Exception:
print("Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True).reshape(-1)[0]
ep_start_end_ids = lang_data["info"]["indx"]
lang_ann = lang_data["language"]["emb"]
lang_lookup = []
max_batched_length_per_demo = []
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
possible_indices = end_idx + 1 - start_idx - self.max_window_size # TODO: check it for skip_frames
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, lang_lookup, max_batched_length_per_demo, lang_ann
def load_file_indices(self, abs_datasets_dir: Path) -> Tuple[List, List]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
max_batched_length_per_demo = []
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
episode_lookup.append(idx)
possible_indices = end_idx + 1 - start_idx - self.max_window_size
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, max_batched_length_per_demo
| 41.324468 | 118 | 0.644098 | 7,370 | 0.948642 | 0 | 0 | 0 | 0 | 0 | 0 | 2,701 | 0.347664 |
ff34739569bdf334a928ceaa661e604b98814f9f | 7,537 | py | Python | Recommender system/web_scraper.py | ab5163/STEM-Away | a1f3fbe34c88d1c255075796c542493444544d36 | [
"MIT"
] | null | null | null | Recommender system/web_scraper.py | ab5163/STEM-Away | a1f3fbe34c88d1c255075796c542493444544d36 | [
"MIT"
] | null | null | null | Recommender system/web_scraper.py | ab5163/STEM-Away | a1f3fbe34c88d1c255075796c542493444544d36 | [
"MIT"
] | null | null | null | import urllib
import json
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
def getpage(num):
url = "https://forums.eveonline.com/c/marketplace/sales-ads/55?no_definitions=true&page="+str(num)
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
soup_all = soup.findAll('span', class_='link-top-line')
title = []
url = []
for i in soup_all:
post = i.find(class_ = "title raw-link raw-topic-link")
title.append(post.text)
url.append(post.get('href'))
data = pd.DataFrame({'title': title, 'url': url})
return data
dflist = []
for i in range(23):
df = getpage(i)
dflist.append(df)
data = pd.concat(dflist).reset_index().drop(columns = ['index'])
stop = stopwords.words('english')
add_stop_words = ['wts', 'bump', 'hello', 'currently', 'looking', 'to', 'sell', 'the', 'following', 'hey', 'so', 'guys',
'price', 'is']
stop.extend(add_stop_words)
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
def remove_html(text):
sup = BeautifulSoup(text,'lxml')
html_free = sup.get_text()
return html_free
def remove_punc(text):
no_punc = " ".join([c for c in text if c not in string.punctuation])
return no_punc
def remove_stopwords(text):
words = [w for w in text if w not in stop]
return words
def word_lemmatizer(text):
lem_text = [lemmatizer.lemmatize(i) for i in text]
return lem_text
def word_stemmer(text):
stem_text = [stemmer.stem(i) for i in text]
return stem_text
def read_from_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
containers = soup.findAll("div", {"class":"topic-body crawler-post"})
df = pd.DataFrame(columns=['user', 'content'])
count = 0
for container in containers:
user_container = container.findAll("span", {"itemprop":"name"})
user = user_container[0].text
#print("User: " + user.lower())
content_container = container.findAll("div", {"class":"post"})
"""
This if statement should be removed once infinite scorlling bar is handled
"""
if content_container:
content = remove_html(content_container[0].text)
dfcontent = (content.lower()).replace("\t","").replace("\n"," ").replace("https ", "https")\
.replace("…","").replace("we’re", "we are").replace("“","").replace("”","").replace("i’ll", "i will")
gruber = re.compile(r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|
gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|
ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz
|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et
|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id
|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu
|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np
|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|
Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|
uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+
\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>
?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop
|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au
|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|
co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|g
f|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo
|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|
mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|p
n|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy
|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|
ye|yt|yu|za|zm|zw)\b/?(?!@)))""")
split_dfcontent = gruber.split(dfcontent)
for i in range(0, len(split_dfcontent), 2):
split_dfcontent[i] = remove_punc(split_dfcontent[i])
final_dfcontent = " ".join(split_dfcontent)
df.loc[count] = [user.lower()] + [(' '.join(final_dfcontent.split())).lower()]
count += 1
df['stem'] = df['content']
for i in range(len(containers)):
#print(df['Content'][i])
df['stem'][i] = re.split(r'\s{1,}', df['content'][i])
df['stem'] = df['stem'].apply(lambda x : remove_stopwords(x))
"""
df['stem']=df['stem'].apply(lambda x: word_lemmatizer(x))
"""
df['stem'] = df['stem'].apply(lambda x: word_stemmer(x))
return df
data['starter_content'] = ''
data['starter_stem'] = ''
for i in range(len(data)):
subdata = read_from_url(data['url'][i])
starter_content = ''
starter_stem = []
reply_content=''
reply_stem = []
for k in range(len(subdata)):
if subdata['user'][k] == subdata['user'][0]:
starter_content += subdata['content'][k]
starter_stem += subdata['stem'][k]
data['starter_content'][i] = starter_content
data['starter_stem'][i] = starter_stem
data = data.iloc[1:]
title_stem = data.title.apply(lambda x: ' '.join([item for item in x.lower().split() if item not in stop]))
data['title_stem'] = title_stem
data['content'] = data.title_stem + ' ' + data.starter_content
def price_xtrct(text):
b = re.findall(r'\d+[b]\b', text) + re.findall(r'\d+ bil\b', text) + re.findall(r'\d+ billion\b', text)
m = re.findall(r'\d+[m]\b', text) + re.findall(r'\d+ mil\b', text) + re.findall(r'\d+ million\b', text)
k = re.findall(r'\d+[k]\b', text)
price = b + m + k
item = []
idx0 = 0
for i in price:
idx1 = text.index(i)
item.append(text[idx0:idx1-1])
idx0 = idx1 + len(i) + 1
return item, price
item_price = data.content.apply(lambda x: price_xtrct(x))
item = [i[0] for i in item_price]
price = [i[1] for i in item_price]
data['item'] = item
data['price'] = price
for k, v in enumerate(data.title_stem):
if not data.item.iloc[k]:
data.item.iloc[k] = [v]
price = []
item = []
url = []
for k, v in enumerate(data.url):
for i, j in enumerate(data.item.iloc[k]):
item.append(j)
if data.price.iloc[k]:
price.append(data.price.iloc[k][i])
else:
price.append('NA')
url.append(v)
df = pd.DataFrame({'item': item, 'price': price, 'url': url})
df.to_csv('sales_data.csv') | 40.961957 | 122 | 0.580337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,244 | 0.429271 |
ff34d2054883cf24cbc01749ebf6d106ab82c808 | 1,778 | py | Python | NeuralNetworks/Layers/dense.py | hiandersson/deep-dream-networks | e6b3d527a723cd67fc6863f838a57096563466fa | [
"MIT"
] | 1 | 2022-01-01T12:11:04.000Z | 2022-01-01T12:11:04.000Z | NeuralNetworks/Layers/dense.py | hiandersson/deep-dream-networks | e6b3d527a723cd67fc6863f838a57096563466fa | [
"MIT"
] | null | null | null | NeuralNetworks/Layers/dense.py | hiandersson/deep-dream-networks | e6b3d527a723cd67fc6863f838a57096563466fa | [
"MIT"
] | null | null | null | import numpy
from NeuralNetworks.Layers.activations import lambda_from_function
class Dense:
def __init__(self, num_nodes = 1, input_dim = None, activation = 'sigmoid'):
# set number of nodes
self.num_nodes = num_nodes
self.input_dim = input_dim
self.activation = activation
# activation and derivate functions
self.activation_function, self.activation_gradient = lambda_from_function(activation)
def init(self, previous_layer):
self.previous_layer = previous_layer
if previous_layer == None:
input_dim = self.input_dim
else:
input_dim = previous_layer.num_nodes
self.weights = numpy.random.normal(0.0, pow(input_dim, -0.5), (self.num_nodes, input_dim))
self.output_shape = (self.num_nodes, 1)
def forward(self, input):
# calculate signals into hidden layer
hidden_input = numpy.dot(self.weights, input)
# calculate s emerging from hidden layer
output = self.activation_function(hidden_input)
assert(self.output_shape == output.shape)
self.layer_output = output
return self.layer_output
def backward(self, learning_rate, error_gradient_in, previous_layer_output):
# delta w = old d_W - alpha * (d_E / d_W) = learningrate * error_next * sigmoid(output_this) * (1 - sigmoid(output_this)) * output_previous
self.weights += learning_rate * numpy.dot(
(error_gradient_in * self.activation_gradient(self.layer_output)),
numpy.transpose(previous_layer_output))
# propagate the gradient error to previous layer
error_gradient_out = numpy.dot(self.weights.T, error_gradient_in)
return error_gradient_out
| 33.54717 | 147 | 0.674916 | 1,696 | 0.953881 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.185039 |
ff35be0655f2594bf8b249c16444cb716e81aff8 | 2,068 | py | Python | src/static/widget/autocomplete/js/minify.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | 78 | 2017-05-26T08:38:25.000Z | 2022-02-25T08:55:31.000Z | src/static/widget/autocomplete/js/minify.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | 105 | 2017-05-18T21:57:13.000Z | 2022-03-18T21:41:47.000Z | src/static/widget/autocomplete/js/minify.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | 19 | 2017-06-12T18:31:54.000Z | 2021-11-10T00:04:43.000Z | #!/usr/bin/python
import sys
if sys.version > '3':
PY3 = True
else:
PY3 = False
if PY3:
import http.client, urllib.request, urllib.parse, urllib.error, sys, json,pprint, os.path
else:
import httplib, urllib, sys, json,pprint, os.path
infile = sys.argv[1] if len(sys.argv)>1 else 'mygene_query.js'
outfile = '_min'.join(os.path.splitext(infile))
with file(infile) as in_f:
if PY3:
params = urllib.parse.urlencode([
#('code_url', sys.argv[1]),
('js_code', in_f.read()),
#('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
#('warning_level', 'verbose'),
])
else:
params = urllib.urlencode([
#('code_url', sys.argv[1]),
('js_code', in_f.read()),
#('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
#('warning_level', 'verbose'),
])
headers = { "Content-type": "application/x-www-form-urlencoded" }
if PY3:
conn = http.client.HTTPConnection('closure-compiler.appspot.com')
else:
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
conn.close
res = json.loads(data)
print("Errors:",)
pprint.pprint(res.get('errors', None))
print("Warnings:",)
pprint.pprint(res.get('warnings', None))
pprint.pprint(res['statistics'])
out_f = file(outfile, 'w')
out_f.write(res['compiledCode'])
out_f.close()
| 31.333333 | 93 | 0.576886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 783 | 0.378627 |
ff3706c2486945d76ad59f2792a976df3fa7f5b3 | 464 | py | Python | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-07-15 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('multimenus', '0005_auto_20200415_1621'),
]
operations = [
migrations.AlterField(
model_name='menuitem',
name='url',
field=models.CharField(blank=True, help_text='Page url, can be absolute or related.', max_length=200, null=True),
),
]
| 24.421053 | 125 | 0.625 | 370 | 0.797414 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.299569 |
ff390f1c78c4b2350ac89e8b55df3651f99bb7d5 | 36 | py | Python | vnpy/gateway/sec/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 19,529 | 2015-03-02T12:17:35.000Z | 2022-03-31T17:18:27.000Z | vnpy/gateway/sec/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 2,186 | 2015-03-04T23:16:33.000Z | 2022-03-31T03:44:01.000Z | vnpy/gateway/sec/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 8,276 | 2015-03-02T05:21:04.000Z | 2022-03-31T13:13:13.000Z | from .sec_gateway import SecGateway
| 18 | 35 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff3972f77730df133aea4a5e788e02702aea2856 | 2,449 | py | Python | Vertical Order Traversal of a Binary Tree.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Vertical Order Traversal of a Binary Tree.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Vertical Order Traversal of a Binary Tree.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
Given a binary tree, return the vertical order traversal of its nodes values.
For each node at position (X, Y), its left and right children respectively will be at positions (X-1, Y-1) and (X+1, Y-1).
Running a vertical line from X = -infinity to X = +infinity, whenever the vertical line touches some nodes, we report the values of the nodes in order from top to bottom (decreasing Y coordinates).
If two nodes have the same position, then the value of the node that is reported first is the value that is smaller.
Return an list of non-empty reports in order of X coordinate. Every report will have a list of values of nodes.
Example 1:
Input: [3,9,20,null,null,15,7]
Output: [[9],[3,15],[20],[7]]
Explanation:
Without loss of generality, we can assume the root node is at position (0, 0):
Then, the node with value 9 occurs at position (-1, -1);
The nodes with values 3 and 15 occur at positions (0, 0) and (0, -2);
The node with value 20 occurs at position (1, -1);
The node with value 7 occurs at position (2, -2).
Example 2:
Input: [1,2,3,4,5,6,7]
Output: [[4],[2],[1,5,6],[3],[7]]
Explanation:
The node with value 5 and the node with value 6 have the same position according to the given scheme.
However, in the report "[1,5,6]", the node value of 5 comes first since 5 is smaller than 6.
Note:
The tree will have between 1 and 1000 nodes.
Each node's value will be between 0 and 1000.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def verticalTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
# col: node.val
dic = {}
vec = [(root, 0)]
while vec:
next_vec = []
for node, col in vec:
if col not in dic:
dic[col] = [node.val]
else:
dic[col].append(node.val)
if node.left:
next_vec.append((node.left, col-1))
if node.right:
next_vec.append((node.right, col+1))
vec = sorted(next_vec, key = lambda x: (x[1], x[0].val))
res = []
for col in sorted(dic.keys()):
res.append(dic[col])
return res
| 31 | 197 | 0.597387 | 854 | 0.348714 | 0 | 0 | 0 | 0 | 0 | 0 | 1,676 | 0.684361 |
ff39d54b3433a471a6702b5c30caf7429b0f87ae | 2,404 | py | Python | scripts/distribution.py | Qi-max/amlearn | 88189519bc1079ab5085d5871169c223e0d03057 | [
"BSD-3-Clause-LBNL"
] | 12 | 2019-02-07T16:45:29.000Z | 2021-03-15T12:44:07.000Z | scripts/distribution.py | Qi-max/amlearn | 88189519bc1079ab5085d5871169c223e0d03057 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-11-22T04:59:10.000Z | 2019-12-05T14:22:29.000Z | scripts/distribution.py | Qi-max/amlearn | 88189519bc1079ab5085d5871169c223e0d03057 | [
"BSD-3-Clause-LBNL"
] | 5 | 2020-12-03T07:18:50.000Z | 2022-01-20T09:17:47.000Z | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
__author__ = "Qi Wang"
__email__ = "qiwang.mse@gmail.com"
def column_hist(data, bins=None, density=True, fraction=True,
save_figure_to_dir=None, save_data_to_dir=None, fmt=None,
ylim=None, yscale=None):
bins = 20 if bins is None else bins
fmt = '%.10e' if fmt is None else fmt
hist, edge = np.histogram(data, bins=bins, density=density)
if density is False:
if fraction:
hist = hist/hist.sum()
if save_figure_to_dir:
plt.figure(figsize=(6, 6))
# alpha gives transparency
plt.plot(edge[1:], hist, 'r--o', alpha=0.5, linewidth=1.0)
if ylim:
plt.ylim(*ylim)
if yscale:
plt.yscale(yscale)
plt.savefig(save_figure_to_dir, dpi=100, bbox_inches='tight')
plt.close()
if save_data_to_dir:
np.savetxt(save_data_to_dir, list(zip(edge[1:], hist)), fmt=fmt)
if __name__ == "__main__":
system = ["Cu65Zr35", "qr_5plus10^10"]
prediction_file = "xx"
output_path = "xxx"
output_file_header = r'{}_{}_QS'.format(*system)
qs_col = "QS_predict"
bin = 0.02
df = pd.read_csv(prediction_file, index_col=0)
column_hist(df[qs_col], bins=np.arange(0, 1.0, bin), density=True,
save_figure_to_dir=os.path.join(output_path, "{}_density_bin_{}.png".format(output_file_header, bin)),
save_data_to_dir=os.path.join(output_path, "{}_density_bin_{}.csv".format(output_file_header, bin)),
fmt=["%.2f", '%.10e'])
column_hist(df[qs_col], bins=np.arange(0, 1.0, bin), density=False,
save_figure_to_dir=os.path.join(output_path, "{}_fraction_bin_{}.png".format(output_file_header, bin)),
save_data_to_dir=os.path.join(output_path, "{}_fraction_bin_{}.csv".format(output_file_header, bin)),
ylim=(0, 0.05),
fmt=["%.2f", '%.10e'])
column_hist(df[qs_col], bins=np.arange(0, 1.0, bin), density=False,
save_figure_to_dir=os.path.join(output_path, "{}_fraction_bin_{}_log.png".format(output_file_header, bin)),
save_data_to_dir=os.path.join(output_path, "{}_fraction_bin_{}_log.csv".format(output_file_header, bin)),
ylim=(0.0001, 0.05), yscale="log",
fmt=["%.2f", '%.10e'])
| 38.15873 | 123 | 0.61772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.140599 |
ff3ba356b3b950b1ffa30b4e137a88a175b8f034 | 1,700 | py | Python | mauna_loa.py | ellisbrown/BNN-Uncertainty | 71271c2a9387baf57a1007303b989e71f85e2abe | [
"MIT"
] | null | null | null | mauna_loa.py | ellisbrown/BNN-Uncertainty | 71271c2a9387baf57a1007303b989e71f85e2abe | [
"MIT"
] | null | null | null | mauna_loa.py | ellisbrown/BNN-Uncertainty | 71271c2a9387baf57a1007303b989e71f85e2abe | [
"MIT"
] | null | null | null |
import tensorflow as tf
import matplotlib.pyplot as plt
import argparse
from keras.models import load_model
from data import load_from_H5
from bnn import bnn
from viz import plot_predictions
parser = argparse.ArgumentParser(description='Mauna Loa runner')
parser.add_argument('--trained_model', default='models/mauna_loa.h5',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--train', default=False, type=bool)
parser.add_argument('--model_name', default='mauna_loa.h5', type=str,
help='Dir to save results')
parser.add_argument('--epochs', default=40, type=int, help='Number of epochs to train on')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
test_hdf5_filepath = 'data/Mauna Loa/test.h5'
train_hdf5_filepath = 'data/Mauna Loa/train.h5'
testset = load_from_H5(test_hdf5_filepath)
trainset = load_from_H5(train_hdf5_filepath)
X_test, y_test = testset
X_train, y_train = trainset
N = 272
# l2 = 0.01
num_hidden_layers = 5
# num hidden units
n_hidden = 1024
epochs = args.epochs
batch_size = 128
epochs_multiplier = 1
epochs_multiplier
tau = 0.1
dropout = 0.1
net = bnn(
X_train,
y_train,
([int(n_hidden)] * num_hidden_layers),
normalize=False,
tau=tau,
dropout=dropout,
activation='relu'
)
if args.train:
print("Training a new model...")
net.train(X_train, y_train, epochs=epochs, batch_size=batch_size,
verbose=1)
net.model.save("models/" + args.model_name)
else:
net.model = load_model(args.trained_model)
plot_predictions(net, trainset, X_test, iters=20, n_std=4)
plt.show()
| 27.419355 | 103 | 0.724118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.212353 |
ff3c1fbbadea09a6b0eb293f80eec2bc2157f2ee | 692 | py | Python | guest/models.py | TylerBackman/chemminetools | 1896c2dd7362f44193528aef028390543a125921 | [
"BSD-4-Clause-UC"
] | 16 | 2015-03-24T18:34:18.000Z | 2018-04-11T06:02:52.000Z | guest/models.py | zagrosman/chemminetools | 1896c2dd7362f44193528aef028390543a125921 | [
"BSD-4-Clause-UC"
] | 60 | 2019-01-03T18:15:55.000Z | 2022-03-12T00:14:59.000Z | guest/models.py | zagrosman/chemminetools | 1896c2dd7362f44193528aef028390543a125921 | [
"BSD-4-Clause-UC"
] | 13 | 2018-04-25T18:25:09.000Z | 2022-02-14T08:15:03.000Z | import datetime
from django.db import models
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
class Guest(models.Model):
"""
A temporary user.
Fields:
``user`` - The temporary user.
``last_used`` - The last time we noted this user doing something.
All users with a record in this model are temporary and should be
deleted after GUEST_DELETE_TIME.
"""
user = models.ForeignKey(User,on_delete=models.CASCADE)
last_used = models.DateTimeField(User)
@classmethod
def create_guest(self, user):
guest = Guest(user=user, last_used=datetime.datetime.now())
return guest
| 24.714286 | 69 | 0.693642 | 549 | 0.793353 | 0 | 0 | 135 | 0.195087 | 0 | 0 | 268 | 0.387283 |
ff3d3ed279a1850ba4785d212475832f6102bcfe | 288 | py | Python | test/unit/metrics/utils.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 1,142 | 2019-10-14T18:05:46.000Z | 2022-03-30T06:56:54.000Z | test/unit/metrics/utils.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 623 | 2019-10-14T17:11:25.000Z | 2022-03-31T17:46:54.000Z | test/unit/metrics/utils.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 299 | 2019-10-15T00:09:53.000Z | 2022-03-30T12:35:27.000Z | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import fairlearn.metrics as metrics
def _get_raw_MetricFrame():
# Gets an uninitialised MetricFrame for testing purposes
return metrics.MetricFrame.__new__(metrics.MetricFrame)
| 28.8 | 65 | 0.802083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.534722 |
ff3d57b7a5ef287c60851d0bb59634ded80f741c | 1,584 | py | Python | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | 1 | 2020-12-08T03:02:00.000Z | 2020-12-08T03:02:00.000Z | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | null | null | null | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | null | null | null | def check_hgt(h):
if "cm" in h:
return 150 <= int(h.replace("cm","")) <= 193
elif "in" in h:
return 59 <= int(h.replace("in","")) <= 76
else:
return False
def check_hcl(h):
if h[0] != '#' or len(h) != 7:
return False
for x in h[1:]:
if not x in list("abcdef0123456789"):
return False
return True
def check_range(v, low, high):
return low <= int(v) <= high
validation = {
"hgt": check_hgt,
"hcl": check_hcl,
"pid": lambda x: len(x) == 9,
"byr": lambda x: check_range(x, 1920, 2002),
"iyr": lambda x: check_range(x, 2010, 2020),
"eyr": lambda x: check_range(x, 2020, 2030),
"ecl": lambda x: x in ["amb","blu","brn","gry", "grn", "hzl", "oth"],
"cid": lambda x: True
}
def validate_passport(p):
return len(p) == 8 or (len(p) == 7 and not p.get("cid", None))
#with open("test.txt", "rt") as file:
with open("day4.txt", "rt") as file:
valid = 0
valid2 = 0
passport = {}
passport2 = {}
for l in file.read().splitlines():
if l.strip() == "":
valid += validate_passport(passport)
valid2 += validate_passport(passport2)
passport = {}
passport2 = {}
else:
for x in l.strip().split(' '):
p = x.split(":")
passport[p[0]] = p[1]
if (validation[p[0]](p[1])):
passport2[p[0]] = p[1]
valid += validate_passport(passport)
valid2 += validate_passport(passport2)
print (valid)
print (valid2)
| 27.310345 | 73 | 0.508207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.113636 |
ff3f7bfba4752a7df9e45892c6e150e45e34b651 | 29,763 | py | Python | core/dataflow/nodes/volatility_models.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | core/dataflow/nodes/volatility_models.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | core/dataflow/nodes/volatility_models.py | ajmal017/amp | 8de7e3b88be87605ec3bad03c139ac64eb460e5c | [
"BSD-3-Clause"
] | null | null | null | import collections
import datetime
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sklear
import core.config as cconfig
import core.data_adapters as cdataa
import core.dataflow.utils as cdu
import core.finance as cfinan
import core.signal_processing as csigna
import core.statistics as cstati
import helpers.dbg as dbg
from core.dataflow.core import DAG, Node
from core.dataflow.nodes.base import (
ColModeMixin,
FitPredictNode,
SeriesToDfColProcessor,
)
from core.dataflow.nodes.sources import ReadDataFromDf
from core.dataflow.nodes.transformers import ColumnTransformer
from core.dataflow.visitors import extract_info
_LOG = logging.getLogger(__name__)
_COL_TYPE = Union[int, str]
_PANDAS_DATE_TYPE = Union[str, pd.Timestamp, datetime.datetime]
_TO_LIST_MIXIN_TYPE = Union[List[_COL_TYPE], Callable[[], List[_COL_TYPE]]]
class SmaModel(FitPredictNode, ColModeMixin):
"""
Fit and predict a smooth moving average (SMA) model.
"""
def __init__(
self,
nid: str,
col: _TO_LIST_MIXIN_TYPE,
steps_ahead: int,
tau: Optional[float] = None,
min_tau_periods: Optional[float] = 2,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and SMA modeling parameters.
:param nid: unique node id
:param col: name of column to model
:param steps_ahead: as in `ContinuousSkLearnModel`
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter. Will be re-learned on each `fit` call.
:param min_tau_periods: similar to `min_periods` as in
`csigna.compute_smooth_moving_average`, but expressed in units of
tau
:param col_mode: `merge_all` or `replace_all`, as in `ColumnTransformer()`
:param nan_mode: as in `ContinuousSkLearnModel`
"""
super().__init__(nid)
self._col = cdu.convert_to_list(col)
dbg.dassert_eq(len(self._col), 1)
self._steps_ahead = steps_ahead
dbg.dassert_lte(
0, self._steps_ahead, "Non-causal prediction attempted! Aborting..."
)
if nan_mode is None:
self._nan_mode = "raise"
else:
self._nan_mode = nan_mode
self._col_mode = col_mode or "replace_all"
dbg.dassert_in(self._col_mode, ["replace_all", "merge_all"])
# Smooth moving average model parameters to learn.
self._must_learn_tau = tau is None
self._tau = tau
self._min_tau_periods = min_tau_periods or 0
self._min_depth = 1
self._max_depth = 1
self._metric = sklear.metrics.mean_absolute_error
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
idx = df_in.index[: -self._steps_ahead]
x_vars = self._col
y_vars = self._col
df = cdu.get_x_and_forward_y_fit_df(
df_in, x_vars, y_vars, self._steps_ahead
)
forward_y_cols = df.drop(x_vars, axis=1).columns
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, df.index)
# Define and fit model.
if self._must_learn_tau:
forward_y_df = df[forward_y_cols]
# Prepare forward y_vars in sklearn format.
forward_y_fit = cdataa.transform_to_sklearn(
forward_y_df, forward_y_df.columns.tolist()
)
# Prepare `x_vars` in sklearn format.
x_fit = cdataa.transform_to_sklearn(df, self._col)
self._tau = self._learn_tau(x_fit, forward_y_fit)
_LOG.debug("tau=%s", self._tau)
return self._predict_and_package_results(df_in, idx, df.index, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
cdu.validate_df_indices(df_in)
df = df_in.copy()
idx = df.index
# Restrict to times where col has no NaNs.
non_nan_idx = df.loc[idx][self._col].dropna().index
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, non_nan_idx)
# Use trained model to generate predictions.
dbg.dassert_is_not(
self._tau,
None,
"Parameter tau not found! Check if `fit` has been run.",
)
return self._predict_and_package_results(
df_in, idx, non_nan_idx, fit=False
)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {"_tau": self._tau, "_info['fit']": self._info["fit"]}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]) -> None:
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
def _predict_and_package_results(
self,
df_in: pd.DataFrame,
idx: pd.Index,
non_nan_idx: pd.Index,
fit: bool = True,
) -> Dict[str, pd.DataFrame]:
data = cdataa.transform_to_sklearn(df_in.loc[non_nan_idx], self._col)
fwd_y_hat = self._predict(data)
forward_y_df = cdu.get_forward_cols(df_in, self._col, self._steps_ahead)
forward_y_df = forward_y_df.loc[non_nan_idx]
# Put predictions in dataflow dataframe format.
fwd_y_hat_vars = [f"{y}_hat" for y in forward_y_df.columns]
fwd_y_hat = cdataa.transform_from_sklearn(
non_nan_idx, fwd_y_hat_vars, fwd_y_hat
)
# Return targets and predictions.
df_out = forward_y_df.reindex(idx).merge(
fwd_y_hat.reindex(idx), left_index=True, right_index=True
)
dbg.dassert_no_duplicates(df_out.columns)
# Select columns for output.
df_out = self._apply_col_mode(
df_in, df_out, cols=self._col, col_mode=self._col_mode
)
# Update `info`.
info = collections.OrderedDict()
info["tau"] = self._tau
info["min_periods"] = self._get_min_periods(self._tau)
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
def _handle_nans(
self, idx: pd.DataFrame.index, non_nan_idx: pd.DataFrame.index
) -> None:
if self._nan_mode == "raise":
if idx.shape[0] != non_nan_idx.shape[0]:
nan_idx = idx.difference(non_nan_idx)
raise ValueError(f"NaNs detected at {nan_idx}")
elif self._nan_mode == "drop":
pass
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized nan_mode `{self._nan_mode}`")
def _learn_tau(self, x: np.array, y: np.array) -> float:
def score(tau: float) -> float:
x_srs = pd.DataFrame(x.flatten())
sma = csigna.compute_smooth_moving_average(
x_srs,
tau=tau,
min_periods=0,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
min_periods = self._get_min_periods(tau)
return self._metric(sma[min_periods:], y[min_periods:])
tau_lb, tau_ub = 1, 1000
# Satisfy 2 * tau_ub * min_tau_periods = len(x).
# This ensures that no more than half of the `fit` series is burned.
if self._min_tau_periods > 0:
tau_ub = int(len(x) / (2 * self._min_tau_periods))
opt_results = sp.optimize.minimize_scalar(
score, method="bounded", bounds=[tau_lb, tau_ub]
)
return opt_results.x
def _get_min_periods(self, tau: float) -> int:
"""
Return burn-in period.
Multiplies `tau` by `min_tau_periods` and converts to an integer.
:param tau: kernel tau (approximately equal to center of mass)
:return: minimum number of periods required to generate a prediction
"""
return int(np.rint(self._min_tau_periods * tau))
def _predict(self, x: np.array) -> np.array:
x_srs = pd.DataFrame(x.flatten())
# TODO(*): Make `min_periods` configurable.
min_periods = int(np.rint(self._min_tau_periods * self._tau))
_LOG.debug("min_periods=%f", min_periods)
x_sma = csigna.compute_smooth_moving_average(
x_srs,
tau=self._tau,
min_periods=min_periods,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
return x_sma.values
class SingleColumnVolatilityModel(FitPredictNode):
def __init__(
self,
nid: str,
steps_ahead: int,
col: _COL_TYPE,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
out_col_prefix: Optional[str] = None,
) -> None:
"""
Parameters have the same meaning as `SmaModel`.
"""
super().__init__(nid)
self._col = col
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
self._progress_bar = progress_bar
self._tau = tau
self._learn_tau_on_fit = tau is None
self._nan_mode = nan_mode
self._out_col_prefix = out_col_prefix
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col": self._col,
"_tau": self._tau,
"_info['fit']": self._info["fit"],
"_out_col_prefix": self._out_col_prefix,
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col = fit_state["_col"]
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
self._out_col_prefix = fit_state["_out_col_prefix"]
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=True)}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=False)}
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool) -> pd.DataFrame:
info = collections.OrderedDict()
name = self._out_col_prefix or self._col
name = str(name)
dbg.dassert_not_in(name + "_vol", df_in.columns)
if self._learn_tau_on_fit and fit:
tau = None
else:
tau = self._tau
config = self._get_config(col=self._col, out_col_prefix=name, tau=tau)
dag = self._get_dag(df_in[[self._col]], config)
mode = "fit" if fit else "predict"
df_out = dag.run_leq_node(
"demodulate_using_vol_pred", mode, progress_bar=self._progress_bar
)["df_out"]
info[self._col] = extract_info(dag, [mode])
if self._learn_tau_on_fit and fit:
self._tau = info[self._col]["compute_smooth_moving_average"]["fit"][
"tau"
]
df_out = df_out.reindex(df_in.index)
self._set_info(mode, info)
return df_out
def _get_config(
self,
col: _COL_TYPE,
out_col_prefix: _COL_TYPE,
tau: Optional[float] = None,
) -> cconfig.Config:
"""
Generate a DAG config.
:param col: column whose volatility is to be modeled
:param tau: tau for SMA; if `None`, then to be learned
:return: a complete config to be used with `_get_dag()`
"""
config = cconfig.get_config_from_nested_dict(
{
"calculate_vol_pth_power": {
"cols": [col],
"col_rename_func": lambda x: out_col_prefix + "_vol",
"col_mode": "merge_all",
},
"compute_smooth_moving_average": {
"col": [out_col_prefix + "_vol"],
"steps_ahead": self._steps_ahead,
"tau": tau,
"col_mode": "merge_all",
"nan_mode": self._nan_mode,
},
"calculate_vol_pth_root": {
"cols": [
out_col_prefix + "_vol",
out_col_prefix + "_vol_" + str(self._steps_ahead),
out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
],
"col_mode": "replace_selected",
},
"demodulate_using_vol_pred": {
"signal_cols": [col],
"volatility_col": out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
"signal_steps_ahead": 0,
"volatility_steps_ahead": self._steps_ahead,
"col_rename_func": lambda x: out_col_prefix + "_vol_adj",
"col_mode": "replace_selected",
"nan_mode": self._nan_mode,
},
}
)
return config
def _get_dag(self, df_in: pd.DataFrame, config: cconfig.Config) -> DAG:
"""
Build a DAG from data and config.
:param df_in: data over which to run DAG
:param config: config for configuring DAG nodes
:return: ready-to-run DAG
"""
dag = DAG(mode="strict")
_LOG.debug("%s", config)
# Load `df_in`.
nid = "load_data"
node = ReadDataFromDf(nid, df_in)
tail_nid = self._append(dag, None, node)
# Raise volatility columns to pth power.
nid = "calculate_vol_pth_power"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** self._p_moment,
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Predict pth power of volatility using smooth moving average.
nid = "compute_smooth_moving_average"
node = SmaModel(nid, **config[nid].to_dict())
tail_nid = self._append(dag, tail_nid, node)
# Calculate the pth root of volatility columns.
nid = "calculate_vol_pth_root"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** (1.0 / self._p_moment),
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Divide returns by volatilty prediction.
nid = "demodulate_using_vol_pred"
node = VolatilityModulator(
nid, mode="demodulate", **config[nid].to_dict()
)
self._append(dag, tail_nid, node)
return dag
# TODO(gp): This code has several copies. Move it to the base class.
@staticmethod
def _append(dag: DAG, tail_nid: Optional[str], node: Node) -> str:
dag.add_node(node)
if tail_nid is not None:
dag.connect(tail_nid, node.nid)
return node.nid
class _MultiColVolatilityModelMixin:
def _fit_predict_volatility_model(
self, df: pd.DataFrame, fit: bool, out_col_prefix: Optional[str] = None
) -> Tuple[Dict[str, pd.DataFrame], collections.OrderedDict]:
dfs = {}
info = collections.OrderedDict()
for col in df.columns:
local_out_col_prefix = out_col_prefix or col
scvm = SingleColumnVolatilityModel(
"volatility",
steps_ahead=self._steps_ahead,
col=col,
p_moment=self._p_moment,
progress_bar=self._progress_bar,
tau=self._tau,
nan_mode=self._nan_mode,
out_col_prefix=local_out_col_prefix,
)
if fit:
df_out = scvm.fit(df[[col]])["df_out"]
info_out = scvm.get_info("fit")
self._col_fit_state[col] = scvm.get_fit_state()
else:
scvm.set_fit_state(self._col_fit_state[col])
df_out = scvm.predict(df[[col]])["df_out"]
info_out = scvm.get_info("predict")
dfs[col] = df_out
info[col] = info_out
return dfs, info
class VolatilityModel(
FitPredictNode,
ColModeMixin,
_MultiColVolatilityModelMixin,
):
"""
Fit and predict a smooth moving average volatility model.
Wraps `SmaModel` internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
steps_ahead: int,
cols: Optional[_TO_LIST_MIXIN_TYPE] = None,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
col_rename_func: Callable[[Any], Any] = lambda x: f"{x}_zscored",
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and smooth moving average (SMA) modeling parameters.
:param nid: unique node id
:param cols: name of columns to model
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param col_rename_func: renaming function for z-scored column
:param col_mode:
- If "merge_all" (default), merge all columns from input dataframe and
transformed columns
- If "replace_selected", merge unselected columns from input dataframe
and transformed selected columns
- If "replace_all", leave only transformed selected columns
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
self._cols = cols
self._steps_ahead = steps_ahead
#
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
dbg.dassert(tau is None or tau > 0)
self._tau = tau
self._col_rename_func = col_rename_func
self._col_mode = col_mode or "merge_all"
self._nan_mode = nan_mode
# State of the model to serialize/deserialize.
self._fit_cols: List[_COL_TYPE] = []
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_fit_cols": self._fit_cols,
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._fit_cols = fit_state["_fit_cols"]
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
# Get the columns.
self._fit_cols = cdu.convert_to_list(self._cols or df_in.columns.tolist())
df = df_in[self._fit_cols]
dfs, info = self._fit_predict_volatility_model(df, fit=fit)
df_out = pd.concat(dfs.values(), axis=1)
df_out = self._apply_col_mode(
df_in.drop(df_out.columns.intersection(df_in.columns), 1),
df_out,
cols=self._fit_cols,
col_mode=self._col_mode,
)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class MultiindexVolatilityModel(FitPredictNode, _MultiColVolatilityModelMixin):
"""
Fit and predict a smooth moving average volatility model.
Wraps SmaModel internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
in_col_group: Tuple[_COL_TYPE],
steps_ahead: int,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and sma modeling parameters.
:param nid: unique node id
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
dbg.dassert_isinstance(in_col_group, tuple)
self._in_col_group = in_col_group
self._out_col_group = in_col_group[:-1]
self._out_col_prefix = str(in_col_group[-1])
#
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
self._tau = tau
self._nan_mode = nan_mode
#
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
df = SeriesToDfColProcessor.preprocess(df_in, self._in_col_group)
dfs, info = self._fit_predict_volatility_model(
df, fit=fit, out_col_prefix=self._out_col_prefix
)
df_out = SeriesToDfColProcessor.postprocess(dfs, self._out_col_group)
df_out = cdu.merge_dataframes(df_in, df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class VolatilityModulator(FitPredictNode, ColModeMixin):
"""
Modulate or demodulate signal by volatility.
Processing steps:
- shift volatility to align it with signal
- multiply/divide signal by volatility
Usage examples:
- Z-scoring
- to obtain volatility prediction, pass in returns into `SmaModel` with
a `steps_ahead` parameter
- to z-score, pass in signal, volatility prediction, `signal_steps_ahead=0`,
`volatility_steps_ahead=steps_ahead`, `mode='demodulate'`
- Undoing z-scoring
- Let's say we have
- forward volatility prediction `n` steps ahead
- prediction of forward z-scored returns `m` steps ahead. Z-scoring
for the target has been done using the volatility prediction above
- To undo z-scoring, we need to pass in the prediction of forward
z-scored returns, forward volatility prediction, `signal_steps_ahead=n`,
`volatility_steps_ahead=m`, `mode='modulate'`
"""
def __init__(
self,
nid: str,
signal_cols: _TO_LIST_MIXIN_TYPE,
volatility_col: _COL_TYPE,
signal_steps_ahead: int,
volatility_steps_ahead: int,
mode: str,
col_rename_func: Optional[Callable[[Any], Any]] = None,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
:param nid: node identifier
:param signal_cols: names of columns to (de)modulate
:param volatility_col: name of volatility column
:param signal_steps_ahead: steps ahead of the signal columns. If signal
is at `t_0`, this value should be `0`. If signal is a forward
prediction of z-scored returns indexed by knowledge time, this
value should be equal to the number of steps of the prediction
:param volatility_steps_ahead: steps ahead of the volatility column. If
volatility column is an output of `SmaModel`, this corresponds to
the `steps_ahead` parameter
:param mode: "modulate" or "demodulate"
:param col_rename_func: as in `ColumnTransformer`
:param col_mode: as in `ColumnTransformer`
"""
super().__init__(nid)
self._signal_cols = cdu.convert_to_list(signal_cols)
self._volatility_col = volatility_col
dbg.dassert_lte(0, signal_steps_ahead)
self._signal_steps_ahead = signal_steps_ahead
dbg.dassert_lte(0, volatility_steps_ahead)
self._volatility_steps_ahead = volatility_steps_ahead
dbg.dassert_in(mode, ["modulate", "demodulate"])
self._mode = mode
self._col_rename_func = col_rename_func or (lambda x: x)
self._col_mode = col_mode or "replace_all"
self._nan_mode = nan_mode or "leave_unchanged"
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("predict", info)
return {"df_out": df_out}
def _process_signal(self, df_in: pd.DataFrame) -> pd.DataFrame:
"""
Modulate or demodulate signal by volatility prediction.
:param df_in: dataframe with `self._signal_cols` and
`self._volatility_col` columns
:return: adjusted signal indexed in the same way as the input signal
"""
dbg.dassert_is_subset(self._signal_cols, df_in.columns.tolist())
dbg.dassert_in(self._volatility_col, df_in.columns)
fwd_signal = df_in[self._signal_cols]
fwd_volatility = df_in[self._volatility_col]
# Shift volatility to align it with signal.
volatility_shift = self._volatility_steps_ahead - self._signal_steps_ahead
if self._nan_mode == "drop":
fwd_volatility = fwd_volatility.dropna()
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized `nan_mode` {self._nan_mode}")
volatility_aligned = fwd_volatility.shift(volatility_shift)
# Adjust signal by volatility.
if self._mode == "demodulate":
adjusted_signal = fwd_signal.divide(volatility_aligned, axis=0)
elif self._mode == "modulate":
adjusted_signal = fwd_signal.multiply(volatility_aligned, axis=0)
else:
raise ValueError(f"Invalid mode=`{self._mode}`")
df_out = self._apply_col_mode(
df_in,
adjusted_signal,
cols=self._signal_cols,
col_rename_func=self._col_rename_func,
col_mode=self._col_mode,
)
return df_out
class VolatilityNormalizer(FitPredictNode, ColModeMixin):
def __init__(
self,
nid: str,
col: str,
target_volatility: float,
col_mode: Optional[str] = None,
) -> None:
"""
Normalize series to target annual volatility.
:param nid: node identifier
:param col: name of column to rescale
:param target_volatility: target volatility as a proportion
:param col_mode: `merge_all` or `replace_all`. If `replace_all`, return
only the rescaled column, if `merge_all`, append the rescaled
column to input dataframe
"""
super().__init__(nid)
self._col = col
self._target_volatility = target_volatility
self._col_mode = col_mode or "merge_all"
dbg.dassert_in(
self._col_mode,
["merge_all", "replace_all"],
"Invalid `col_mode`='%s'",
self._col_mode,
)
self._scale_factor: Optional[float] = None
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
self._scale_factor = cfinan.compute_volatility_normalization_factor(
df_in[self._col], self._target_volatility
)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
# Store info.
info = collections.OrderedDict()
info["scale_factor"] = self._scale_factor
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
return {"df_out": df_out}
| 37.818297 | 84 | 0.606659 | 28,789 | 0.967275 | 0 | 0 | 212 | 0.007123 | 0 | 0 | 8,454 | 0.284044 |
ff3fa16f112510d5cb03e41854b90f0444d90f6d | 3,228 | py | Python | tempgres/service.py | ClockworkConsulting/django-tempgres | ba9a061f5f61730784522f4b0f75a90c19826fa6 | [
"Apache-2.0"
] | null | null | null | tempgres/service.py | ClockworkConsulting/django-tempgres | ba9a061f5f61730784522f4b0f75a90c19826fa6 | [
"Apache-2.0"
] | null | null | null | tempgres/service.py | ClockworkConsulting/django-tempgres | ba9a061f5f61730784522f4b0f75a90c19826fa6 | [
"Apache-2.0"
] | null | null | null | import logging
import requests
logger = logging.getLogger(__name__)
class TempgresDatabaseService:
"""Connection to a tempgres service instance defined by an URL."""
def __init__(self, url, **kwargs):
"""Setup the Tempgress connection.
url can be None, in which case kwargs must contain
the database information as a dictionary.
"""
self.url = url
self.response = None
self.data = kwargs
# Do POST if we have an URL
if self.url:
if kwargs:
logger.warning(
"Tempgres will use the database information "
"which has been supllied in kwargs!"
)
self._post()
# If we still do not have a response use kwargs as data
if self.response is None:
if not kwargs:
raise ValueError("Data dictionary must be supplied")
else:
self._parse()
def _post(self):
"""Get the response to the post request from the tempgress service."""
self.response = requests.post(url=self.url)
if not (self.response.status_code == requests.codes.ok):
self.response.raise_for_status()
def _parse(self):
"""Parse the response from a tempgres instance.
Raises an error if the response does not conform.
"""
if (self.response is None):
raise ValueError("No response exsists!")
parsed = self.response.text.splitlines()
if not (len(parsed) == 5):
raise TypeError("Response does not conform to expected format!")
self.data['USER'] = parsed[0]
self.data['PASSWORD'] = parsed[1]
self.data['HOST'] = parsed[2]
self.data['PORT'] = parsed[3]
self.data['NAME'] = parsed[4]
def __str__(self):
a = []
for k in self.data.keys():
a.append("{}: {}".format(k, self.data[k]))
return "\n".join(a)
def setdefault(self, key, value):
return self.data.setdefault(key, value)
def __getitem__(self, value):
return self.data[value]
def __contains__(self, value):
return value in self.data
def get(self, key, value=None):
return self.data.get(key, value)
# Main method for rudimentary testing
if __name__ == "__main__":
print("Test run of TempgresDatabaseService")
print("")
db_info = {
'USER': 'tempgres-client',
'PASSWORD': 'tempgres-cpass',
'HOST': 'localhost',
'PORT': '5432',
'NAME': 'temp_c23ff99a_ff56_4810_8692_7f779564b073'
}
tmpgres = None
try:
tmpgres = TempgresDatabaseService(None)
except ValueError as e:
logger.exception("Caugth exception with text: {}".format(e))
try:
# Should produce a warning (and fails because of the url)
tmpgres = TempgresDatabaseService('localhost', **db_info)
except requests.exceptions.MissingSchema:
print("OK")
print("")
# Create a tempgres service with custom data
tmpgres = TempgresDatabaseService(None, **db_info)
for key in tmpgres.data.keys():
print("{}: {}".format(key, tmpgres[key]))
| 28.315789 | 78 | 0.589839 | 2,224 | 0.688971 | 0 | 0 | 0 | 0 | 0 | 0 | 1,093 | 0.3386 |
ff3fe555e0f853acd0d7022e07b47e9bc456ae3d | 323 | py | Python | tests/client/test_get_balance.py | kanzure/eth-testrpc | cbb22ff2bebc9f12098b0bb7f2a245f3510522b4 | [
"MIT"
] | 164 | 2016-04-20T08:30:22.000Z | 2022-01-05T07:43:12.000Z | tests/client/test_get_balance.py | kanzure/eth-testrpc | cbb22ff2bebc9f12098b0bb7f2a245f3510522b4 | [
"MIT"
] | 55 | 2016-06-30T20:06:56.000Z | 2019-12-12T07:36:02.000Z | tests/client/test_get_balance.py | kanzure/eth-testrpc | cbb22ff2bebc9f12098b0bb7f2a245f3510522b4 | [
"MIT"
] | 46 | 2016-04-27T16:28:46.000Z | 2022-01-09T17:59:09.000Z | def test_deploying_contract(client, hex_accounts):
pre_balance = client.get_balance(hex_accounts[1])
client.send_transaction(
_from=hex_accounts[0],
to=hex_accounts[1],
value=1234,
)
post_balance = client.get_balance(hex_accounts[1])
assert post_balance - pre_balance == 1234
| 24.846154 | 54 | 0.693498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff4088986ff98e499e58a08879fd7b5b158a61fc | 893 | py | Python | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 19 | 2015-08-21T13:06:37.000Z | 2021-07-26T09:56:29.000Z | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 36 | 2015-01-13T13:01:07.000Z | 2016-06-22T06:59:59.000Z | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 8 | 2015-12-11T16:41:47.000Z | 2019-04-08T16:28:40.000Z | from goslate import Goslate
from nltk import word_tokenize
import ipdb
from marmot.features.feature_extractor import FeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
class GoogleTranslateFeatureExtractor(FeatureExtractor):
def __init__(self, lang='en'):
self.lang = lang
def get_features(self, context_obj):
if 'source' not in context_obj:
raise NoDataError('source', context_obj, 'GoogleTranslateFeatureExtractor')
if 'pseudo-reference' in context_obj:
translation = context_obj['pseudo-reference']
else:
gs = Goslate()
translation = word_tokenize(gs.translate(' '.join(context_obj['source']), self.lang))
if context_obj['token'] in translation:
return [1]
return [0]
def get_feature_names(self):
return ["pseudo-reference"]
| 30.793103 | 97 | 0.677492 | 699 | 0.782755 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.139978 |
ff40b32664a84e5bb2c3ecaa367f1779b82a8367 | 235 | py | Python | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | if __name__ == '__main__':
_, X = input().split()
subjects = list()
for _ in range(int(X)):
subjects.append(map(float, input().split()))
for i in zip(*subjects):
print("{0:.1f}".format(sum(i)/len(i)))
| 23.5 | 52 | 0.544681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.080851 |
ff45f44cc60ac4208193a70ca5502f0bb47aa1a1 | 9,110 | py | Python | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | 1 | 2019-06-04T15:36:39.000Z | 2019-06-04T15:36:39.000Z | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | null | null | null | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | 1 | 2019-06-04T15:42:39.000Z | 2019-06-04T15:42:39.000Z | """
dspace_dsa_maker.py - prepare DSpace Simple Archives for import.
Copyright 2018 University of Toronto Libraries
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/python
# coding=utf8
from zipfile import ZipFile
import os
import shutil
import glob
import sys
from bs4 import BeautifulSoup as makesoup
class DSpaceDSAMaker(object):
"""Generate DSpace Simple Archives for each zip archive in deposit directory
"""
def __init__(self):
"""Make working directories and start iteration of deposited zips.
"""
print "Launching DSpace DSA Maker. \nPreparing work directories."
self.root = os.path.dirname(os.path.abspath(__file__))
self.deposit = os.path.join(self.root, '../deposit/')
self.extract = os.path.join(self.root, '../extract/')
self.ingest = os.path.join(self.root, '../ingest/')
# purge the extract directory of failed ingests
if os.path.isdir(self.extract):
shutil.rmtree(self.extract)
# make deposit, extract and ingest work directories
for current_dir in [self.deposit, self.extract, self.ingest]:
if not os.path.isdir(current_dir):
os.mkdir(current_dir)
print "Made " + current_dir
# Set the year of publication based on metadata
self.year = ''
self.filename = ''
self.extract_dir = ''
self.journal_name = ''
self.iterate()
def iterate(self):
"""For each zip file in the deposit directory, call work functions.
"""
if not os.listdir(self.deposit):
print "Nothing to ingest. Please check " + self.deposit
else:
print "Checking " + self.deposit + " for new items to ingest."
for original_zip in os.listdir(self.deposit):
if original_zip.endswith('.zip'):
print "Found " + original_zip
self.current_zip = original_zip
self.extract_zip(original_zip)
self.crosswalk()
self.contents()
self.move_to_ingest()
def extract_zip(self, zipname):
"""Extract one zip file and set up instance variables for later use.
"""
zipfile = ZipFile(self.deposit + zipname)
zipfile.extractall(self.extract)
self.filename = zipname.split(".zip")[0]
self.extract_dir = os.path.join(self.extract, self.filename)
self.journal_name = zipname.split("-")[0]
def crosswalk(self):
"""Produce dublin_core.xml by matching tags from original XML to user-defined tags.
Change this function to match your original XML fieldset.
"""
os.chdir(self.extract_dir)
# assume the main metadata is the only XML file in the original directory
base = open(glob.glob("*-metadata.xml")[0])
soup = makesoup(base, 'xml')
# make a container soup, and make smaller soups for each field,
# which are inserted into the container.
newsoup = makesoup('<dublin_core schema="dc"></dublin_core>', 'xml')
tag_list = []
# title
tag_list.append(makesoup("<dcvalue element='title'>" + \
soup.find('article-title').string + "</dcvalue>", 'xml').contents[0])
# author(s)
for author_container in soup.find_all('contrib', {'contrib-type' : 'author'}):
tag_list.append(makesoup("<dcvalue element='contributor' qualifier='author'>" + \
author_container.surname.string + ", " + \
author_container.find('given-names').string + \
"</dcvalue>", 'xml').contents[0])
for author_container in soup.find_all('contrib', {'contrib-type' : 'editor'}):
tag_list.append(makesoup("<dcvalue element='contributor' qualifier='editor'>" + \
author_container.surname.string + ", " + \
author_container.find('given-names').string + \
"</dcvalue>", 'xml').contents[0])
# abstract
tag_list.append(makesoup("<dcvalue element='abstract'>" + soup.abstract.p.string + \
"</dcvalue>", 'xml').contents[0])
# date(s)
date_accepted = soup.find('date', {'date-type' : 'accepted'})
self.year = date_accepted.year.string
tag_list.append(makesoup("<dcvalue element='date' qualifier='issued'>" + \
"-".join((date_accepted.year.string, date_accepted.month.string, \
date_accepted.day.string)) + "</dcvalue>", 'xml').contents[0])
# publisher
tag_list.append(makesoup("<dcvalue element='publisher'>" \
+ soup.find('publisher-name').string + \
"</dcvalue>", 'xml').contents[0])
# issn
tag_list.append(makesoup("<dcvalue element='identifier' qualifier='issn'>" \
+ soup.find("issn", {'pub-type' : 'ppub'}).string \
+ "</dcvalue>", 'xml').contents[0])
# essn
tag_list.append(makesoup("<dcvalue element='identifier' qualifier='issn'>" \
+ soup.find("issn", {'pub-type' : 'epub'}).string \
+ "</dcvalue>", 'xml').contents[0])
# DOI
tag_list.append(makesoup("<dcvalue element='identifier'" \
+ "qualifier='doi'>https://dx.doi.org/" \
+ soup.find('article-id', {'pub-id-type' : 'doi'}).string \
+ "</dcvalue>", 'xml').contents[0])
# insert all created tags from taglist into the container soup
dublin_core = newsoup.find('dublin_core')
for tag in tag_list:
dublin_core.append(tag)
# write out the complete DSpace DC
dublin_core = open("dublin_core.xml", 'w')
dublin_core.write(str(newsoup))
dublin_core.close()
os.chdir("../")
def contents(self):
"""Generate the plain text list of bitstreams for DSpace import.
Common use case is a single PDF, sometimes with supplementary files of all formats.
This uses filename matching, so change the pattern according to your setup.
"""
os.chdir(self.extract_dir)
# assuming dublin_core.xml has been successfully made. Old MD can be deleted.
os.remove(glob.glob("*-metadata.xml")[0])
# also delete the directories that were transferred
# but never used - ScholarOne sends these by default
os.remove(glob.glob("*-manifest.html")[0])
shutil.rmtree('pdf_renditions')
shutil.rmtree('doc')
# move the main PDF from its containing directory to the root directory we are working in
if os.path.isdir('pdf'):
manuscript = os.path.basename(glob.glob('pdf/*.pdf')[0])
shutil.move('pdf/' + manuscript, '.')
shutil.rmtree('pdf')
else:
sys.exit("Unable to produce DSpaceSA for zip file " + \
self.current_zip + "\nDirectory 'pdf' could not be found.")
# in the same fashion, move any suppl files to the root directory
if os.path.isdir('suppl_data'):
for suppl_file in os.listdir('suppl_data'):
shutil.move('suppl_data/' + suppl_file, '.')
shutil.rmtree('suppl_data')
# add all non-dspace files into the contents file list text file for dspace import
# assuming we cleaned up all non-importable files such as
# pdf_renditions, doc and the manifest
contents = open('contents', 'w')
contents.write(manuscript + "\n")
for found_file in os.listdir("."):
if found_file not in ['dublin_core.xml', 'contents', manuscript]:
contents.write(found_file + '\n')
contents.close()
def move_to_ingest(self):
"""Since DSpace import requires a root directory for each collection,
separate deposit item into directories that map to collections in DSpace.
The import python script will pick these up.
"""
ingest_path = os.path.join(self.ingest, self.journal_name, self.year)
if not os.path.exists(ingest_path):
os.makedirs(ingest_path)
target = os.path.join(ingest_path, self.filename)
if os.path.exists(target):
shutil.rmtree(target)
shutil.move(self.extract_dir, os.path.join(ingest_path, self.filename))
if __name__ == "__main__":
DSpaceDSAMaker()
| 42.570093 | 97 | 0.597475 | 8,265 | 0.907245 | 0 | 0 | 0 | 0 | 0 | 0 | 4,003 | 0.439407 |
ff46471a452e693a79e98c3a031f0fa30bba73ed | 533 | py | Python | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | """
Date: 2022.04.13 10:28
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.04.13 10:28
"""
import tempfile
from pathlib import Path
from create_config_file.notes import notes_append_header
def test_notes_append_header():
path = tempfile.gettempdir()
file = Path(path) / "file.txt"
file.write_text("old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
file.write_text("---old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
| 20.5 | 56 | 0.718574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.268293 |
ff46c04244d1b8d9066db29c8d66cfe8df3315a5 | 11,620 | py | Python | src/seirsplus/models/compartment_model_builder.py | SEIRS-Plus/v2 | 3adc155400deaa4093e523ae81d2a25989888654 | [
"MIT"
] | 1 | 2022-03-04T08:05:58.000Z | 2022-03-04T08:05:58.000Z | src/seirsplus/models/compartment_model_builder.py | SEIRS-Plus/v2 | 3adc155400deaa4093e523ae81d2a25989888654 | [
"MIT"
] | null | null | null | src/seirsplus/models/compartment_model_builder.py | SEIRS-Plus/v2 | 3adc155400deaa4093e523ae81d2a25989888654 | [
"MIT"
] | null | null | null | """
Programmatically building up compartment models
"""
# Standard Libraries
import json
# External Libraries
import numpy as np
class CompartmentModelBuilder:
"""
The CompartmentModelBuilder class gives helper functions for defining
a new compartment model from scratch within a python script. Initializes
an empty dictionary, compartments, that new compartments can be added to.
"""
def __init__(self):
self.compartments = {}
def add_compartment(
self,
name,
transitions=None,
transmissibilities=None,
susceptibilities=None,
initial_prevalence=0.0,
exogenous_prevalence=0.0,
flags=None,
default_state=None,
exclude_from_eff_pop=False,
):
"""
Function to build an individual compartment for a compartment model
Args:
name (string): name of the compartment
transitions:
susceptibilities:
initial_prevalence (float):
exogenous_prevalence (float):
flags:
default_state:
exclude_from_eff_pop:
"""
self.compartments[name] = {
"transitions": transitions if transitions is not None else {},
"transmissibilities": transmissibilities
if transmissibilities is not None
else {},
"susceptibilities": susceptibilities
if susceptibilities is not None
else {},
"initial_prevalence": initial_prevalence,
"exogenous_prevalence": exogenous_prevalence,
"flags": flags if flags is not None else [],
"default_state": default_state if default_state is not None else False,
"exclude_from_eff_pop": exclude_from_eff_pop,
}
if default_state is None and not any(
[self.compartments[c]["default_state"] for c in self.compartments]
):
# There is no default state set so far, make this new compartment the default
self.compartments[name]["default_state"] = True
def add_compartments(self, names):
"""Function to compartments to a compartment model using the add_compartment function
Args:
names (list): list of compartment names to add to the compartment model
"""
for name in names:
self.add_compartment(name)
def add_transition(
self, compartment, to, upon_exposure_to=None, rate=None, time=None, prob=None
):
"""function to add transition for one compartment and destination state at a time
Args:
compartment (string): name of compartment
to (string):
upon_exposure_to (list): list of compartments that can cause a transition
rate:
time (float): how long it takes for transition to occur
prob (float): likelihood of the transition occuring
"""
infectiousStates = (
[upon_exposure_to]
if (
not isinstance(upon_exposure_to, (list, np.ndarray))
and upon_exposure_to is not None
)
else upon_exposure_to
)
if upon_exposure_to is None: # temporal transition
transn_config = {}
if time is not None:
transn_config.update({"time": time, "rate": 1 / time})
if rate is not None:
transn_config.update({"rate": rate, "time": 1 / rate})
if prob is not None:
transn_config.update({"prob": prob})
self.compartments[compartment]["transitions"].update({to: transn_config})
else: # transmission-induced transition
for infectiousState in infectiousStates:
transn_config = {}
if prob is not None:
# transmission-induced transition do not have rates/times
transn_config.update({"prob": prob})
if (
infectiousState
in self.compartments[compartment]["susceptibilities"]
):
self.compartments[compartment]["susceptibilities"][infectiousState][
"transitions"
].update({to: transn_config})
else:
self.compartments[compartment]["susceptibilities"].update(
{infectiousState: {"transitions": {to: transn_config}}}
)
def set_transition_rate(self, compartment, to, rate):
# Note that it only makes sense to set a rate for temporal transitions.
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
destStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
transn_dict = self.compartments[compartment]["transitions"]
for destState in destStates:
try:
transn_dict[destState]["rate"] = rate
transn_dict[destState]["time"] = 1 / rate
except KeyError:
transn_dict[destState] = {"rate": rate}
transn_dict[destState] = {"time": 1 / rate}
def set_transition_time(self, compartment, to, time):
# Note that it only makes sense to set a time for temporal transitions.
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
destStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
transn_dict = self.compartments[compartment]["transitions"]
for destState in destStates:
try:
transn_dict[destState]["time"] = time
transn_dict[destState]["rate"] = 1 / time
except KeyError:
transn_dict[destState] = {"time": time}
transn_dict[destState] = {"rate": 1 / time}
def set_transition_probability(
self, compartment, probs_dict, upon_exposure_to=None
):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
infectiousStates = (
[upon_exposure_to]
if (
not isinstance(upon_exposure_to, (list, np.ndarray))
and upon_exposure_to is not None
)
else upon_exposure_to
)
for compartment in compartments:
if upon_exposure_to is None:
transn_dict = self.compartments[compartment]["transitions"]
for destState in probs_dict:
try:
transn_dict[destState]["prob"] = probs_dict[destState]
except KeyError:
transn_dict[destState] = {"prob": probs_dict[destState]}
else:
for infectiousState in infectiousStates:
transn_dict = self.compartments[compartment]["susceptibilities"][
infectiousState
]["transitions"]
for destState in probs_dict:
try:
transn_dict[destState]["prob"] = probs_dict[destState]
except KeyError:
transn_dict[destState] = {"prob": probs_dict[destState]}
def set_susceptibility(self, compartment, to, susceptibility=1.0, transitions={}):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
infectiousStates = [to] if not isinstance(to, (list, np.ndarray)) else to
for compartment in compartments:
for infectiousState in infectiousStates:
self.compartments[compartment]["susceptibilities"].update(
{
infectiousState: {
"susceptibility": susceptibility,
"transitions": transitions,
}
}
)
def set_transmissibility(self, compartment, transm_mode, transmissibility=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
transmModes = (
[transm_mode]
if not isinstance(transm_mode, (list, np.ndarray))
else transm_mode
)
for compartment in compartments:
transm_dict = self.compartments[compartment]["transmissibilities"]
for transmMode in transmModes:
transm_dict = self.compartments[compartment][
"transmissibilities"
].update({transmMode: transmissibility})
def set_initial_prevalence(self, compartment, prevalence=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["initial_prevalence"] = prevalence
def set_exogenous_prevalence(self, compartment, prevalence=0.0):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["exogenous_prevalence"] = prevalence
def set_default_state(self, compartment):
for c in self.compartments:
self.compartments[c]["default_state"] = c == compartment
def set_exclude_from_eff_pop(self, compartment, exclude=True):
compartments = (
[compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
for compartment in compartments:
self.compartments[compartment]["exclude_from_eff_pop"] = exclude
def add_compartment_flag(self, compartment, flag):
compartments = (
list(range(self.pop_size))
if compartment == "all"
else [compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
flags = [flag] if not isinstance(flag, (list, np.ndarray)) else flag
for compartment in compartments:
for flag in flags:
self.compartments[compartment]["flags"].append(flag)
def remove_compartment_flag(self, compartment, flag):
compartments = (
list(range(self.pop_size))
if compartment == "all"
else [compartment]
if not isinstance(compartment, (list, np.ndarray))
else compartment
)
flags = [flag] if not isinstance(flag, (list, np.ndarray)) else flag
for compartment in compartments:
for flag in flags:
self.compartments[compartment]["flags"] = [
f for f in self.compartments[compartment]["flags"] if f != flag
] # remove all occurrences of flag
def save_json(self, filename):
"""
Function to save a compartment model as a JSON
"""
with open(filename, "w") as outfile:
json.dump(self.compartments, outfile, indent=6)
| 39.124579 | 93 | 0.569535 | 11,487 | 0.988554 | 0 | 0 | 0 | 0 | 0 | 0 | 2,399 | 0.206454 |
ff46c556255a2b8fc5d5a10506e048de2e0a70b8 | 959 | py | Python | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | 1 | 2021-05-06T22:38:23.000Z | 2021-05-06T22:38:23.000Z | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | 4 | 2021-01-12T02:15:00.000Z | 2021-06-30T21:14:43.000Z | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | null | null | null | """
================
Multiple Figures
================
Test case for putting multiple figures into sphinx gallery.
The playback file was generated using:
``record_file("multifig.py", ["slider_fig", "fig"])``
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
# The parametrized function to be plotted
def f(t, amplitude, frequency):
return amplitude * np.sin(2 * np.pi * frequency * t)
t = np.linspace(0, 1, 1000)
# Define initial parameters
init_amplitude = 5
init_frequency = 3
# Create the figure and the line that we will manipulate
fig, ax = plt.subplots()
(line,) = plt.plot(t, f(t, init_amplitude, init_frequency), lw=2)
# The function to be called anytime a slider's value changes
def update(val):
line.set_ydata(f(t, 5, val))
fig.canvas.draw_idle()
slider_fig, s_ax = plt.subplots(figsize=(6.4, 2))
slider = Slider(s_ax, "freq", 0, 30, valinit=3)
slider.on_changed(update)
plt.show()
| 22.833333 | 65 | 0.694473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.421272 |
ff46cfaed4a3b5bf3770ac15e877c7b30e1c87cf | 2,085 | py | Python | Chapter7 - whileandinput.py | Gmentesh2/Chapter7-input-whileloop-exerciseHM | 42b96227fa3262c30d5872d425cb7e1581f08e14 | [
"MIT"
] | null | null | null | Chapter7 - whileandinput.py | Gmentesh2/Chapter7-input-whileloop-exerciseHM | 42b96227fa3262c30d5872d425cb7e1581f08e14 | [
"MIT"
] | null | null | null | Chapter7 - whileandinput.py | Gmentesh2/Chapter7-input-whileloop-exerciseHM | 42b96227fa3262c30d5872d425cb7e1581f08e14 | [
"MIT"
] | null | null | null | promt = "Here you can enter"
promt += "a series of toppings>>>"
message = True
while message:
message = input(promt)
if message == "Quit":
print(" I'll add " + message + " to your pizza.")
else:
break
promt = "How old are you?"
promt += "\nEnter 'quit' when you are finished >>>"
while True:
age = input(promt)
if age == "quit":
break
age = int(age)
if age < 3:
print("Your ticket cost is free")
elif age < 13:
print("Your ticket price is 10 $ ")
else:
print("Your ticket price is 15 $")
sandwich_orders = ["hamburger","cheeseburger","veggie burger", "royal"]
finished_sandwiches = []
while sandwich_orders:
sandwich = sandwich_orders.pop()
print("we are working on "+ sandwich + " at the moment")
finished_sandwiches.append(sandwich)
print("\n")
for sandwich in finished_sandwiches:
print("We have made " + sandwich + ", you can take it now")
sandwich_orders = ["pastrami","hamburger","cheeseburger","pastrami","veggie burger", "royal","pastrami"]
finished_sandwiches = []
print("We are sorry, pastrami sandwich ran out of")
while "pastrami" in sandwich_orders:
sandwich_orders.remove("pastrami")
print("\n")
while sandwich_orders:
sandwich = sandwich_orders.pop()
print("Your order "+ sandwich + " will be prepared soon ! ")
finished_sandwiches.append(sandwich)
print("\n")
for sandwich in finished_sandwiches:
print("Your order " + sandwich + " is ready, take it")
#
name_promt = "Whats your name?>>>"
place_promt = "Where would you like to go?>>>"
next_promt = "\nWould you like to go with someone?(yes/no)>>>"
responses = {}
while True:
name = input(name_promt)
place = input(place_promt)
responses[name] = place
other_question = input(next_promt)
if other_question == "yes":
break
print(">>>results<<<")
for name,place in responses.items():
print(name.title() + " would like to go in " + place.title() + ".") | 26.0625 | 105 | 0.615348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.347722 |
ff46d17ceffbb94fbdc105066b86962459c8d03e | 3,380 | py | Python | scrape_mars.py | Jessica-Xia/web-scraping-challenge | 43fc16862cc20e9ac9f93e5e1a960c354fc5a964 | [
"ADSL"
] | null | null | null | scrape_mars.py | Jessica-Xia/web-scraping-challenge | 43fc16862cc20e9ac9f93e5e1a960c354fc5a964 | [
"ADSL"
] | null | null | null | scrape_mars.py | Jessica-Xia/web-scraping-challenge | 43fc16862cc20e9ac9f93e5e1a960c354fc5a964 | [
"ADSL"
] | null | null | null | from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import time
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape_news():
browser = init_browser()
url="https://mars.nasa.gov/news/"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
content = soup.find("ul", class_="item_list")
articles = content.find_all("li")
title_list=[]
text_list=[]
for article in articles:
news_title=article.find("div", class_="content_title").text
title_list.append(news_title)
news_p=article.find("div", class_="article_teaser_body").text
text_list.append(news_p)
latest_title=title_list[0]
latest_news=text_list[0]
browser.quit()
return latest_title, latest_news
def scrape_image():
browser = init_browser()
url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
pictures=soup.find('ul', class_="articles").find_all("li")
complete_url=[]
for picture in pictures:
url=picture.find('div', class_='img').find('img')['src']
complete_url.append("https://www.jpl.nasa.gov"+url)
featured_image_url=complete_url[0]
browser.quit()
return featured_image_url
def scrape_weather():
browser = init_browser()
url="https://twitter.com/marswxreport?lang=en"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
tweets = soup.find('div', class_="stream").find_all("li", class_="js-stream-item stream-item stream-item")
record=[]
for tweet in tweets:
content = tweet.find('div', class_="js-tweet-text-container").find('p').text
if content[0:7]=='InSight':
record.append(content)
Latest_weather=record[0]
browser.quit()
return Latest_weather
def scrape_fact():
#####
url="https://space-facts.com/mars/"
table = pd.read_html(url)[0]
fact_html=table.to_html(header=False, index=False)
return fact_html
def scrape_hemisphere():
browser = init_browser()
url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
time.sleep(1)
html=browser.html
soup=BeautifulSoup(html, "html.parser")
items=soup.find_all('div', class_="item")
hemisphere_image_urls=[]
for item in items:
link=item.find("a", class_="itemLink product-item").find('img')['src']
img_url= "https://astrogeology.usgs.gov/" + link
img_title=item.find("div", class_="description").find("a", class_="itemLink product-item").find("h3").text
hemisphere_image_urls.append({
"title" : img_title,
"img_url": img_url
})
browser.quit()
return hemisphere_image_urls
def scrape_info():
data={}
data["news_title"], data["news_p"]=scrape_news()
data["featured_image_url"]=scrape_image()
data["current_weather"]=scrape_weather()
data["mars_facts"]= scrape_fact()
data["mars_hemisphere" ]=scrape_hemisphere()
return data
| 23.636364 | 114 | 0.65 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.238462 |
ff473bea0ab54b3db0252443c6401db2224d1f9b | 272 | py | Python | test2.py | edwkar/marrow | 8138b44eff2cbcb8f7139cab27f57e6dc263df85 | [
"MIT"
] | null | null | null | test2.py | edwkar/marrow | 8138b44eff2cbcb8f7139cab27f57e6dc263df85 | [
"MIT"
] | null | null | null | test2.py | edwkar/marrow | 8138b44eff2cbcb8f7139cab27f57e6dc263df85 | [
"MIT"
] | null | null | null | def foo(a, b):
x = a * b
y = 3
return y
def foo(a, b):
x = __report_assign(7, 8, 3)
x = __report_assign(pos, v, 3)
y = __report_assign(lineno, col, 3)
print("hello world")
foo(3, 2)
report_call(0, 3, foo, (report_eval(3), report_eval(2),))
| 18.133333 | 57 | 0.566176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.047794 |
ff47954bcddeb6c75bb020c08155072c7830f063 | 6,572 | py | Python | .ipynb_checkpoints/utils_discrete-checkpoint.py | hsharsh/pinn-torch | fa563b324c286ec4425529f5ea1db03e68bec2f3 | [
"MIT"
] | 1 | 2022-01-25T04:27:33.000Z | 2022-01-25T04:27:33.000Z | utils_discrete.py | hsharsh/pinn-torch | fa563b324c286ec4425529f5ea1db03e68bec2f3 | [
"MIT"
] | null | null | null | utils_discrete.py | hsharsh/pinn-torch | fa563b324c286ec4425529f5ea1db03e68bec2f3 | [
"MIT"
] | null | null | null | # Data processing imports
import scipy.io as io
import numpy as np
from pyDOE import lhs
# Plotting imports
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
import matplotlib.gridspec as gridspec
def load_dataset(file):
data = io.loadmat(file)
return data['x'], data['t'], data['usol'].T
# Inference
def preprocess_data_discrete_inference(file, idx_t0, idx_t1, q = 500, N = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
X, T = np.meshgrid(x, t)
test_X = x
test_u = u_exact[idx_t1, :]
# Compute domain bounds for x
lb = test_X.min(0)
ub = test_X.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
x1 = np.vstack([lb, ub])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
return x, t, u_exact, T, lb, ub, dt, x0, u0, x1, test_X, test_u, IRK_weights
def plot_results_discrete_inference(x, t, x0, u0, u_exact, test_X, u1_pred, idx_t0, idx_t1, lb, ub):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
####### Row 0: h(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/2 + 0.1, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), test_X.min(), test_X.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: h(t,x) slices ##################
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/2-0.05, bottom=0.15, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,u_exact[idx_t0,:], 'b-', linewidth = 2)
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t0]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.8, -0.3), ncol=2, frameon=False)
ax = plt.subplot(gs1[0, 1])
ax.plot(x,u_exact[idx_t1,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(test_X, u1_pred[:,-1], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t1]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.1, -0.3), ncol=2, frameon=False)
plt.show()
# Identification
def preprocess_data_discrete_identification(file, idx_t0, idx_t1, N0 = 250, N1 = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
# Compute domain bounds for x
lb = x.min(0)
ub = x.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Determine q
q = int(np.ceil(0.5*np.log(np.finfo(float).eps)/np.log(dt)))
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N0, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
# Sampling for final step
idx_x = np.random.choice(x.shape[0], N1, replace = False)
x1 = x[idx_x,:]
u1 = u_exact[idx_t1:idx_t1+1, idx_x].T
u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
IRK_alphas = IRK_weights[:-1,:]
IRK_betas = IRK_weights[-1:,:]
return x, t, u_exact, lb, ub, dt, q, x0, u0, x1, u1, IRK_alphas, IRK_betas
def plot_results_discrete_identification(x, t, x0, x1, u_exact, u0, u1, idx_t0, idx_t1, lb, ub, lambda_1, lambda_2):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3+0.05, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(),t.max(), lb[0], ub[0]],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/3-0.1, bottom=1-2/3, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x, u_exact[idx_t0,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t0], u0.shape[0]), fontsize = 10)
ax = plt.subplot(gs1[0, 1])
ax.plot(x, u_exact[idx_t1,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x1, u1, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t1], u1.shape[0]), fontsize = 10)
ax.legend(loc='upper center', bbox_to_anchor=(-0.3, -0.3), ncol=2, frameon=False)
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=1-2/3-0.05, bottom=0, left=0.15, right=0.85, wspace=0.0)
ax = plt.subplot(gs2[0, 0])
ax.axis('off')
ax.text(0.5,0.5,f'Correct PDE: $u_t + u u_x - 0.0031831 u_{{xx}} = 0$ \n$\lambda_1$: {lambda_1:.5f} \t\t $\lambda_2$: {lambda_2:.5f}')
plt.show() | 36.715084 | 138 | 0.580037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 977 | 0.148661 |